blob: 3033eb173eb42f7f3b1a1172a69d6ea2b5e8a81e [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070018#include <linux/seq_file.h>
Tejun Heoa6371202012-04-19 16:29:24 -070019#include <linux/radix-tree.h>
Tejun Heoa0516612012-06-26 15:05:44 -070020#include <linux/blkdev.h>
Tejun Heoa5049a82014-06-19 17:42:57 -040021#include <linux/atomic.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050022
Vivek Goyal9355aed2010-10-01 21:16:41 +020023/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX
25
Tejun Heo3381cb82012-04-01 14:38:44 -070026/* CFQ specific, out here for blkcg->cfq_weight */
27#define CFQ_WEIGHT_MIN 10
28#define CFQ_WEIGHT_MAX 1000
29#define CFQ_WEIGHT_DEFAULT 500
30
Tejun Heof48ec1d2012-04-13 13:11:25 -070031#ifdef CONFIG_BLK_CGROUP
32
Tejun Heoedcb0722012-04-01 14:38:42 -070033enum blkg_rwstat_type {
34 BLKG_RWSTAT_READ,
35 BLKG_RWSTAT_WRITE,
36 BLKG_RWSTAT_SYNC,
37 BLKG_RWSTAT_ASYNC,
38
39 BLKG_RWSTAT_NR,
40 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070041};
42
Tejun Heoa6371202012-04-19 16:29:24 -070043struct blkcg_gq;
44
Tejun Heo3c798392012-04-16 13:57:25 -070045struct blkcg {
Tejun Heo36558c82012-04-16 13:57:24 -070046 struct cgroup_subsys_state css;
47 spinlock_t lock;
Tejun Heoa6371202012-04-19 16:29:24 -070048
49 struct radix_tree_root blkg_tree;
50 struct blkcg_gq *blkg_hint;
Tejun Heo36558c82012-04-16 13:57:24 -070051 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070052
Tejun Heo3c798392012-04-16 13:57:25 -070053 /* TODO: per-policy storage in blkcg */
Tejun Heo36558c82012-04-16 13:57:24 -070054 unsigned int cfq_weight; /* belongs to cfq */
Tejun Heoe71357e2013-01-09 08:05:10 -080055 unsigned int cfq_leaf_weight;
Tejun Heo52ebea72015-05-22 17:13:37 -040056
57#ifdef CONFIG_CGROUP_WRITEBACK
58 struct list_head cgwb_list;
59#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -050060};
61
Tejun Heoedcb0722012-04-01 14:38:42 -070062struct blkg_stat {
63 struct u64_stats_sync syncp;
64 uint64_t cnt;
65};
66
67struct blkg_rwstat {
68 struct u64_stats_sync syncp;
69 uint64_t cnt[BLKG_RWSTAT_NR];
70};
71
Tejun Heof95a04a2012-04-16 13:57:26 -070072/*
73 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
74 * request_queue (q). This is used by blkcg policies which need to track
75 * information per blkcg - q pair.
76 *
77 * There can be multiple active blkcg policies and each has its private
78 * data on each blkg, the size of which is determined by
79 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
80 * together with blkg and invokes pd_init/exit_fn() methods.
81 *
82 * Such private data must embed struct blkg_policy_data (pd) at the
83 * beginning and pd_size can't be smaller than pd.
84 */
Tejun Heo03814112012-03-05 13:15:14 -080085struct blkg_policy_data {
Tejun Heob276a872013-01-09 08:05:12 -080086 /* the blkg and policy id this per-policy data belongs to */
Tejun Heo3c798392012-04-16 13:57:25 -070087 struct blkcg_gq *blkg;
Tejun Heob276a872013-01-09 08:05:12 -080088 int plid;
Tejun Heo03814112012-03-05 13:15:14 -080089
Tejun Heoa2b16932012-04-13 13:11:33 -070090 /* used during policy activation */
Tejun Heo36558c82012-04-16 13:57:24 -070091 struct list_head alloc_node;
Tejun Heo03814112012-03-05 13:15:14 -080092};
93
Tejun Heo3c798392012-04-16 13:57:25 -070094/* association between a blk cgroup and a request queue */
95struct blkcg_gq {
Tejun Heoc875f4d2012-03-05 13:15:22 -080096 /* Pointer to the associated request_queue */
Tejun Heo36558c82012-04-16 13:57:24 -070097 struct request_queue *q;
98 struct list_head q_node;
99 struct hlist_node blkcg_node;
Tejun Heo3c798392012-04-16 13:57:25 -0700100 struct blkcg *blkcg;
Tejun Heo3c547862013-01-09 08:05:10 -0800101
102 /* all non-root blkcg_gq's are guaranteed to have access to parent */
103 struct blkcg_gq *parent;
104
Tejun Heoa0516612012-06-26 15:05:44 -0700105 /* request allocation list for this blkcg-q pair */
106 struct request_list rl;
Tejun Heo3c547862013-01-09 08:05:10 -0800107
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800108 /* reference count */
Tejun Heoa5049a82014-06-19 17:42:57 -0400109 atomic_t refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500110
Tejun Heof427d902013-01-09 08:05:12 -0800111 /* is this blkg online? protected by both blkcg and q locks */
112 bool online;
113
Tejun Heo36558c82012-04-16 13:57:24 -0700114 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800115
Tejun Heo36558c82012-04-16 13:57:24 -0700116 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500117};
118
Tejun Heo3c798392012-04-16 13:57:25 -0700119typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
Tejun Heof427d902013-01-09 08:05:12 -0800120typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
121typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
Tejun Heo3c798392012-04-16 13:57:25 -0700122typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
123typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
Vivek Goyal3e252062009-12-04 10:36:42 -0500124
Tejun Heo3c798392012-04-16 13:57:25 -0700125struct blkcg_policy {
Tejun Heo36558c82012-04-16 13:57:24 -0700126 int plid;
127 /* policy specific private data size */
Tejun Heof95a04a2012-04-16 13:57:26 -0700128 size_t pd_size;
Tejun Heo36558c82012-04-16 13:57:24 -0700129 /* cgroup files for the policy */
130 struct cftype *cftypes;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700131
132 /* operations */
133 blkcg_pol_init_pd_fn *pd_init_fn;
Tejun Heof427d902013-01-09 08:05:12 -0800134 blkcg_pol_online_pd_fn *pd_online_fn;
135 blkcg_pol_offline_pd_fn *pd_offline_fn;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700136 blkcg_pol_exit_pd_fn *pd_exit_fn;
137 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500138};
139
Tejun Heo3c798392012-04-16 13:57:25 -0700140extern struct blkcg blkcg_root;
Tejun Heo496d5e72015-05-22 17:13:21 -0400141extern struct cgroup_subsys_state * const blkcg_root_css;
Tejun Heo36558c82012-04-16 13:57:24 -0700142
Tejun Heo3c798392012-04-16 13:57:25 -0700143struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
144struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
145 struct request_queue *q);
Tejun Heo36558c82012-04-16 13:57:24 -0700146int blkcg_init_queue(struct request_queue *q);
147void blkcg_drain_queue(struct request_queue *q);
148void blkcg_exit_queue(struct request_queue *q);
Tejun Heo5efd6112012-03-05 13:15:12 -0800149
Vivek Goyal3e252062009-12-04 10:36:42 -0500150/* Blkio controller policy registration */
Jens Axboed5bf0292014-06-22 16:31:56 -0600151int blkcg_policy_register(struct blkcg_policy *pol);
Tejun Heo3c798392012-04-16 13:57:25 -0700152void blkcg_policy_unregister(struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700153int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700154 const struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700155void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700156 const struct blkcg_policy *pol);
Vivek Goyal3e252062009-12-04 10:36:42 -0500157
Tejun Heo3c798392012-04-16 13:57:25 -0700158void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700159 u64 (*prfill)(struct seq_file *,
160 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700161 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700162 bool show_total);
Tejun Heof95a04a2012-04-16 13:57:26 -0700163u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
164u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo829fdb52012-04-01 14:38:43 -0700165 const struct blkg_rwstat *rwstat);
Tejun Heof95a04a2012-04-16 13:57:26 -0700166u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
167u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
168 int off);
Tejun Heo829fdb52012-04-01 14:38:43 -0700169
Tejun Heo16b3de62013-01-09 08:05:12 -0800170u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
171struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
172 int off);
173
Tejun Heo829fdb52012-04-01 14:38:43 -0700174struct blkg_conf_ctx {
Tejun Heo36558c82012-04-16 13:57:24 -0700175 struct gendisk *disk;
Tejun Heo3c798392012-04-16 13:57:25 -0700176 struct blkcg_gq *blkg;
Tejun Heo36558c82012-04-16 13:57:24 -0700177 u64 v;
Tejun Heo829fdb52012-04-01 14:38:43 -0700178};
179
Tejun Heo3c798392012-04-16 13:57:25 -0700180int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
181 const char *input, struct blkg_conf_ctx *ctx);
Tejun Heo829fdb52012-04-01 14:38:43 -0700182void blkg_conf_finish(struct blkg_conf_ctx *ctx);
183
184
Tejun Heoa7c6d552013-08-08 20:11:23 -0400185static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
186{
187 return css ? container_of(css, struct blkcg, css) : NULL;
188}
189
Tejun Heob1208b52012-06-04 20:40:57 -0700190static inline struct blkcg *task_blkcg(struct task_struct *tsk)
191{
Tejun Heo073219e2014-02-08 10:36:58 -0500192 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
Tejun Heob1208b52012-06-04 20:40:57 -0700193}
194
195static inline struct blkcg *bio_blkcg(struct bio *bio)
196{
197 if (bio && bio->bi_css)
Tejun Heoa7c6d552013-08-08 20:11:23 -0400198 return css_to_blkcg(bio->bi_css);
Tejun Heob1208b52012-06-04 20:40:57 -0700199 return task_blkcg(current);
200}
201
Tejun Heofd383c22015-05-22 17:13:23 -0400202static inline struct cgroup_subsys_state *
203task_get_blkcg_css(struct task_struct *task)
204{
205 return task_get_css(task, blkio_cgrp_id);
206}
207
Tejun Heo03814112012-03-05 13:15:14 -0800208/**
Tejun Heo3c547862013-01-09 08:05:10 -0800209 * blkcg_parent - get the parent of a blkcg
210 * @blkcg: blkcg of interest
211 *
212 * Return the parent blkcg of @blkcg. Can be called anytime.
213 */
214static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
215{
Tejun Heo5c9d5352014-05-16 13:22:48 -0400216 return css_to_blkcg(blkcg->css.parent);
Tejun Heo3c547862013-01-09 08:05:10 -0800217}
218
219/**
Tejun Heo03814112012-03-05 13:15:14 -0800220 * blkg_to_pdata - get policy private data
221 * @blkg: blkg of interest
222 * @pol: policy of interest
223 *
224 * Return pointer to private data associated with the @blkg-@pol pair.
225 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700226static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
227 struct blkcg_policy *pol)
Tejun Heo03814112012-03-05 13:15:14 -0800228{
Tejun Heof95a04a2012-04-16 13:57:26 -0700229 return blkg ? blkg->pd[pol->plid] : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800230}
231
232/**
233 * pdata_to_blkg - get blkg associated with policy private data
Tejun Heof95a04a2012-04-16 13:57:26 -0700234 * @pd: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800235 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700236 * @pd is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800237 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700238static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
Tejun Heo03814112012-03-05 13:15:14 -0800239{
Tejun Heof95a04a2012-04-16 13:57:26 -0700240 return pd ? pd->blkg : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800241}
242
Tejun Heo54e7ed12012-04-16 13:57:23 -0700243/**
244 * blkg_path - format cgroup path of blkg
245 * @blkg: blkg of interest
246 * @buf: target buffer
247 * @buflen: target buffer length
248 *
249 * Format the path of the cgroup of @blkg into @buf.
250 */
Tejun Heo3c798392012-04-16 13:57:25 -0700251static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
Vivek Goyalafc24d42010-04-26 19:27:56 +0200252{
Tejun Heoe61734c2014-02-12 09:29:50 -0500253 char *p;
Tejun Heo54e7ed12012-04-16 13:57:23 -0700254
Tejun Heoe61734c2014-02-12 09:29:50 -0500255 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
256 if (!p) {
Tejun Heo54e7ed12012-04-16 13:57:23 -0700257 strncpy(buf, "<unavailable>", buflen);
Tejun Heoe61734c2014-02-12 09:29:50 -0500258 return -ENAMETOOLONG;
259 }
260
261 memmove(buf, p, buf + buflen - p);
262 return 0;
Vivek Goyalafc24d42010-04-26 19:27:56 +0200263}
264
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800265/**
266 * blkg_get - get a blkg reference
267 * @blkg: blkg to get
268 *
Tejun Heoa5049a82014-06-19 17:42:57 -0400269 * The caller should be holding an existing reference.
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800270 */
Tejun Heo3c798392012-04-16 13:57:25 -0700271static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800272{
Tejun Heoa5049a82014-06-19 17:42:57 -0400273 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
274 atomic_inc(&blkg->refcnt);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800275}
276
Tejun Heo2a4fd072013-05-14 13:52:31 -0700277void __blkg_release_rcu(struct rcu_head *rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800278
279/**
280 * blkg_put - put a blkg reference
281 * @blkg: blkg to put
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800282 */
Tejun Heo3c798392012-04-16 13:57:25 -0700283static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800284{
Tejun Heoa5049a82014-06-19 17:42:57 -0400285 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
286 if (atomic_dec_and_test(&blkg->refcnt))
Tejun Heo2a4fd072013-05-14 13:52:31 -0700287 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800288}
289
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700290struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
291 bool update_hint);
292
293/**
294 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
295 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400296 * @pos_css: used for iteration
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700297 * @p_blkg: target blkg to walk descendants of
298 *
299 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
300 * read locked. If called under either blkcg or queue lock, the iteration
301 * is guaranteed to include all and only online blkgs. The caller may
Tejun Heo492eb212013-08-08 20:11:25 -0400302 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
Tejun Heobd8815a2013-08-08 20:11:27 -0400303 * @p_blkg is included in the iteration and the first node to be visited.
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700304 */
Tejun Heo492eb212013-08-08 20:11:25 -0400305#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
306 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
307 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700308 (p_blkg)->q, false)))
309
Tejun Heoedcb0722012-04-01 14:38:42 -0700310/**
Tejun Heoaa539cb2013-05-14 13:52:31 -0700311 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
312 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400313 * @pos_css: used for iteration
Tejun Heoaa539cb2013-05-14 13:52:31 -0700314 * @p_blkg: target blkg to walk descendants of
315 *
316 * Similar to blkg_for_each_descendant_pre() but performs post-order
Tejun Heobd8815a2013-08-08 20:11:27 -0400317 * traversal instead. Synchronization rules are the same. @p_blkg is
318 * included in the iteration and the last node to be visited.
Tejun Heoaa539cb2013-05-14 13:52:31 -0700319 */
Tejun Heo492eb212013-08-08 20:11:25 -0400320#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
321 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
322 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heoaa539cb2013-05-14 13:52:31 -0700323 (p_blkg)->q, false)))
324
325/**
Tejun Heoa0516612012-06-26 15:05:44 -0700326 * blk_get_rl - get request_list to use
327 * @q: request_queue of interest
328 * @bio: bio which will be attached to the allocated request (may be %NULL)
329 *
330 * The caller wants to allocate a request from @q to use for @bio. Find
331 * the request_list to use and obtain a reference on it. Should be called
332 * under queue_lock. This function is guaranteed to return non-%NULL
333 * request_list.
334 */
335static inline struct request_list *blk_get_rl(struct request_queue *q,
336 struct bio *bio)
337{
338 struct blkcg *blkcg;
339 struct blkcg_gq *blkg;
340
341 rcu_read_lock();
342
343 blkcg = bio_blkcg(bio);
344
345 /* bypass blkg lookup and use @q->root_rl directly for root */
346 if (blkcg == &blkcg_root)
347 goto root_rl;
348
349 /*
350 * Try to use blkg->rl. blkg lookup may fail under memory pressure
351 * or if either the blkcg or queue is going away. Fall back to
352 * root_rl in such cases.
353 */
354 blkg = blkg_lookup_create(blkcg, q);
355 if (unlikely(IS_ERR(blkg)))
356 goto root_rl;
357
358 blkg_get(blkg);
359 rcu_read_unlock();
360 return &blkg->rl;
361root_rl:
362 rcu_read_unlock();
363 return &q->root_rl;
364}
365
366/**
367 * blk_put_rl - put request_list
368 * @rl: request_list to put
369 *
370 * Put the reference acquired by blk_get_rl(). Should be called under
371 * queue_lock.
372 */
373static inline void blk_put_rl(struct request_list *rl)
374{
375 /* root_rl may not have blkg set */
376 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
377 blkg_put(rl->blkg);
378}
379
380/**
381 * blk_rq_set_rl - associate a request with a request_list
382 * @rq: request of interest
383 * @rl: target request_list
384 *
385 * Associate @rq with @rl so that accounting and freeing can know the
386 * request_list @rq came from.
387 */
388static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
389{
390 rq->rl = rl;
391}
392
393/**
394 * blk_rq_rl - return the request_list a request came from
395 * @rq: request of interest
396 *
397 * Return the request_list @rq is allocated from.
398 */
399static inline struct request_list *blk_rq_rl(struct request *rq)
400{
401 return rq->rl;
402}
403
404struct request_list *__blk_queue_next_rl(struct request_list *rl,
405 struct request_queue *q);
406/**
407 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
408 *
409 * Should be used under queue_lock.
410 */
411#define blk_queue_for_each_rl(rl, q) \
412 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
413
Peter Zijlstra90d38392013-11-12 19:42:14 -0800414static inline void blkg_stat_init(struct blkg_stat *stat)
415{
416 u64_stats_init(&stat->syncp);
417}
418
Tejun Heoa0516612012-06-26 15:05:44 -0700419/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700420 * blkg_stat_add - add a value to a blkg_stat
421 * @stat: target blkg_stat
422 * @val: value to add
423 *
424 * Add @val to @stat. The caller is responsible for synchronizing calls to
425 * this function.
426 */
427static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
428{
429 u64_stats_update_begin(&stat->syncp);
430 stat->cnt += val;
431 u64_stats_update_end(&stat->syncp);
432}
433
434/**
435 * blkg_stat_read - read the current value of a blkg_stat
436 * @stat: blkg_stat to read
437 *
438 * Read the current value of @stat. This function can be called without
439 * synchroniztion and takes care of u64 atomicity.
440 */
441static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
442{
443 unsigned int start;
444 uint64_t v;
445
446 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700447 start = u64_stats_fetch_begin_irq(&stat->syncp);
Tejun Heoedcb0722012-04-01 14:38:42 -0700448 v = stat->cnt;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700449 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
Tejun Heoedcb0722012-04-01 14:38:42 -0700450
451 return v;
452}
453
454/**
455 * blkg_stat_reset - reset a blkg_stat
456 * @stat: blkg_stat to reset
457 */
458static inline void blkg_stat_reset(struct blkg_stat *stat)
459{
460 stat->cnt = 0;
461}
462
463/**
Tejun Heo16b3de62013-01-09 08:05:12 -0800464 * blkg_stat_merge - merge a blkg_stat into another
465 * @to: the destination blkg_stat
466 * @from: the source
467 *
468 * Add @from's count to @to.
469 */
470static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
471{
472 blkg_stat_add(to, blkg_stat_read(from));
473}
474
Peter Zijlstra90d38392013-11-12 19:42:14 -0800475static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
476{
477 u64_stats_init(&rwstat->syncp);
478}
479
Tejun Heo16b3de62013-01-09 08:05:12 -0800480/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700481 * blkg_rwstat_add - add a value to a blkg_rwstat
482 * @rwstat: target blkg_rwstat
483 * @rw: mask of REQ_{WRITE|SYNC}
484 * @val: value to add
485 *
486 * Add @val to @rwstat. The counters are chosen according to @rw. The
487 * caller is responsible for synchronizing calls to this function.
488 */
489static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
490 int rw, uint64_t val)
491{
492 u64_stats_update_begin(&rwstat->syncp);
493
494 if (rw & REQ_WRITE)
495 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
496 else
497 rwstat->cnt[BLKG_RWSTAT_READ] += val;
498 if (rw & REQ_SYNC)
499 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
500 else
501 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
502
503 u64_stats_update_end(&rwstat->syncp);
504}
505
506/**
507 * blkg_rwstat_read - read the current values of a blkg_rwstat
508 * @rwstat: blkg_rwstat to read
509 *
510 * Read the current snapshot of @rwstat and return it as the return value.
511 * This function can be called without synchronization and takes care of
512 * u64 atomicity.
513 */
Tejun Heoc94bed892012-04-16 13:57:22 -0700514static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700515{
516 unsigned int start;
517 struct blkg_rwstat tmp;
518
519 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700520 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
Tejun Heoedcb0722012-04-01 14:38:42 -0700521 tmp = *rwstat;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700522 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
Tejun Heoedcb0722012-04-01 14:38:42 -0700523
524 return tmp;
525}
526
527/**
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800528 * blkg_rwstat_total - read the total count of a blkg_rwstat
Tejun Heoedcb0722012-04-01 14:38:42 -0700529 * @rwstat: blkg_rwstat to read
530 *
531 * Return the total count of @rwstat regardless of the IO direction. This
532 * function can be called without synchronization and takes care of u64
533 * atomicity.
534 */
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800535static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700536{
537 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
538
539 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
540}
541
542/**
543 * blkg_rwstat_reset - reset a blkg_rwstat
544 * @rwstat: blkg_rwstat to reset
545 */
546static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
547{
548 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
549}
550
Tejun Heo16b3de62013-01-09 08:05:12 -0800551/**
552 * blkg_rwstat_merge - merge a blkg_rwstat into another
553 * @to: the destination blkg_rwstat
554 * @from: the source
555 *
556 * Add @from's counts to @to.
557 */
558static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
559 struct blkg_rwstat *from)
560{
561 struct blkg_rwstat v = blkg_rwstat_read(from);
562 int i;
563
564 u64_stats_update_begin(&to->syncp);
565 for (i = 0; i < BLKG_RWSTAT_NR; i++)
566 to->cnt[i] += v.cnt[i];
567 u64_stats_update_end(&to->syncp);
568}
569
Tejun Heo36558c82012-04-16 13:57:24 -0700570#else /* CONFIG_BLK_CGROUP */
571
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400572struct blkcg {
573};
Jens Axboe2f5ea472009-12-03 21:06:43 +0100574
Tejun Heof95a04a2012-04-16 13:57:26 -0700575struct blkg_policy_data {
576};
577
Tejun Heo3c798392012-04-16 13:57:25 -0700578struct blkcg_gq {
Jens Axboe2f5ea472009-12-03 21:06:43 +0100579};
580
Tejun Heo3c798392012-04-16 13:57:25 -0700581struct blkcg_policy {
Vivek Goyal3e252062009-12-04 10:36:42 -0500582};
583
Tejun Heo496d5e72015-05-22 17:13:21 -0400584#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
585
Tejun Heofd383c22015-05-22 17:13:23 -0400586static inline struct cgroup_subsys_state *
587task_get_blkcg_css(struct task_struct *task)
588{
589 return NULL;
590}
591
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400592#ifdef CONFIG_BLOCK
593
Tejun Heo3c798392012-04-16 13:57:25 -0700594static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
Tejun Heo5efd6112012-03-05 13:15:12 -0800595static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
596static inline void blkcg_drain_queue(struct request_queue *q) { }
597static inline void blkcg_exit_queue(struct request_queue *q) { }
Jens Axboed5bf0292014-06-22 16:31:56 -0600598static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
Tejun Heo3c798392012-04-16 13:57:25 -0700599static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
Tejun Heoa2b16932012-04-13 13:11:33 -0700600static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700601 const struct blkcg_policy *pol) { return 0; }
Tejun Heoa2b16932012-04-13 13:11:33 -0700602static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700603 const struct blkcg_policy *pol) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500604
Tejun Heob1208b52012-06-04 20:40:57 -0700605static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
Tejun Heoa0516612012-06-26 15:05:44 -0700606
Tejun Heof95a04a2012-04-16 13:57:26 -0700607static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
608 struct blkcg_policy *pol) { return NULL; }
609static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo3c798392012-04-16 13:57:25 -0700610static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
611static inline void blkg_get(struct blkcg_gq *blkg) { }
612static inline void blkg_put(struct blkcg_gq *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200613
Tejun Heoa0516612012-06-26 15:05:44 -0700614static inline struct request_list *blk_get_rl(struct request_queue *q,
615 struct bio *bio) { return &q->root_rl; }
616static inline void blk_put_rl(struct request_list *rl) { }
617static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
618static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
619
620#define blk_queue_for_each_rl(rl, q) \
621 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
622
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400623#endif /* CONFIG_BLOCK */
Tejun Heo36558c82012-04-16 13:57:24 -0700624#endif /* CONFIG_BLK_CGROUP */
625#endif /* _BLK_CGROUP_H */