blob: 4dc643f2046e0419fb8475ed82ea6aab7f21c4b5 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070018#include <linux/seq_file.h>
Tejun Heoa6371202012-04-19 16:29:24 -070019#include <linux/radix-tree.h>
Tejun Heoa0516612012-06-26 15:05:44 -070020#include <linux/blkdev.h>
Tejun Heoa5049a82014-06-19 17:42:57 -040021#include <linux/atomic.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050022
Vivek Goyal9355aed2010-10-01 21:16:41 +020023/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX
25
Tejun Heo3381cb82012-04-01 14:38:44 -070026/* CFQ specific, out here for blkcg->cfq_weight */
27#define CFQ_WEIGHT_MIN 10
28#define CFQ_WEIGHT_MAX 1000
29#define CFQ_WEIGHT_DEFAULT 500
30
Tejun Heof48ec1d2012-04-13 13:11:25 -070031#ifdef CONFIG_BLK_CGROUP
32
Tejun Heoedcb0722012-04-01 14:38:42 -070033enum blkg_rwstat_type {
34 BLKG_RWSTAT_READ,
35 BLKG_RWSTAT_WRITE,
36 BLKG_RWSTAT_SYNC,
37 BLKG_RWSTAT_ASYNC,
38
39 BLKG_RWSTAT_NR,
40 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070041};
42
Tejun Heoa6371202012-04-19 16:29:24 -070043struct blkcg_gq;
44
Tejun Heo3c798392012-04-16 13:57:25 -070045struct blkcg {
Tejun Heo36558c82012-04-16 13:57:24 -070046 struct cgroup_subsys_state css;
47 spinlock_t lock;
Tejun Heoa6371202012-04-19 16:29:24 -070048
49 struct radix_tree_root blkg_tree;
50 struct blkcg_gq *blkg_hint;
Tejun Heo36558c82012-04-16 13:57:24 -070051 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070052
Tejun Heo3c798392012-04-16 13:57:25 -070053 /* TODO: per-policy storage in blkcg */
Tejun Heo36558c82012-04-16 13:57:24 -070054 unsigned int cfq_weight; /* belongs to cfq */
Tejun Heoe71357e2013-01-09 08:05:10 -080055 unsigned int cfq_leaf_weight;
Vivek Goyal31e4c282009-12-03 12:59:42 -050056};
57
Tejun Heoedcb0722012-04-01 14:38:42 -070058struct blkg_stat {
59 struct u64_stats_sync syncp;
60 uint64_t cnt;
61};
62
63struct blkg_rwstat {
64 struct u64_stats_sync syncp;
65 uint64_t cnt[BLKG_RWSTAT_NR];
66};
67
Tejun Heof95a04a2012-04-16 13:57:26 -070068/*
69 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
70 * request_queue (q). This is used by blkcg policies which need to track
71 * information per blkcg - q pair.
72 *
73 * There can be multiple active blkcg policies and each has its private
74 * data on each blkg, the size of which is determined by
75 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
76 * together with blkg and invokes pd_init/exit_fn() methods.
77 *
78 * Such private data must embed struct blkg_policy_data (pd) at the
79 * beginning and pd_size can't be smaller than pd.
80 */
Tejun Heo03814112012-03-05 13:15:14 -080081struct blkg_policy_data {
Tejun Heob276a872013-01-09 08:05:12 -080082 /* the blkg and policy id this per-policy data belongs to */
Tejun Heo3c798392012-04-16 13:57:25 -070083 struct blkcg_gq *blkg;
Tejun Heob276a872013-01-09 08:05:12 -080084 int plid;
Tejun Heo03814112012-03-05 13:15:14 -080085
Tejun Heoa2b16932012-04-13 13:11:33 -070086 /* used during policy activation */
Tejun Heo36558c82012-04-16 13:57:24 -070087 struct list_head alloc_node;
Tejun Heo03814112012-03-05 13:15:14 -080088};
89
Tejun Heo3c798392012-04-16 13:57:25 -070090/* association between a blk cgroup and a request queue */
91struct blkcg_gq {
Tejun Heoc875f4d2012-03-05 13:15:22 -080092 /* Pointer to the associated request_queue */
Tejun Heo36558c82012-04-16 13:57:24 -070093 struct request_queue *q;
94 struct list_head q_node;
95 struct hlist_node blkcg_node;
Tejun Heo3c798392012-04-16 13:57:25 -070096 struct blkcg *blkcg;
Tejun Heo3c547862013-01-09 08:05:10 -080097
98 /* all non-root blkcg_gq's are guaranteed to have access to parent */
99 struct blkcg_gq *parent;
100
Tejun Heoa0516612012-06-26 15:05:44 -0700101 /* request allocation list for this blkcg-q pair */
102 struct request_list rl;
Tejun Heo3c547862013-01-09 08:05:10 -0800103
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800104 /* reference count */
Tejun Heoa5049a82014-06-19 17:42:57 -0400105 atomic_t refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500106
Tejun Heof427d902013-01-09 08:05:12 -0800107 /* is this blkg online? protected by both blkcg and q locks */
108 bool online;
109
Tejun Heo36558c82012-04-16 13:57:24 -0700110 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800111
Tejun Heo36558c82012-04-16 13:57:24 -0700112 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500113};
114
Tejun Heo3c798392012-04-16 13:57:25 -0700115typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
Tejun Heof427d902013-01-09 08:05:12 -0800116typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
117typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
Tejun Heo3c798392012-04-16 13:57:25 -0700118typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
119typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
Vivek Goyal3e252062009-12-04 10:36:42 -0500120
Tejun Heo3c798392012-04-16 13:57:25 -0700121struct blkcg_policy {
Tejun Heo36558c82012-04-16 13:57:24 -0700122 int plid;
123 /* policy specific private data size */
Tejun Heof95a04a2012-04-16 13:57:26 -0700124 size_t pd_size;
Tejun Heo36558c82012-04-16 13:57:24 -0700125 /* cgroup files for the policy */
126 struct cftype *cftypes;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700127
128 /* operations */
129 blkcg_pol_init_pd_fn *pd_init_fn;
Tejun Heof427d902013-01-09 08:05:12 -0800130 blkcg_pol_online_pd_fn *pd_online_fn;
131 blkcg_pol_offline_pd_fn *pd_offline_fn;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700132 blkcg_pol_exit_pd_fn *pd_exit_fn;
133 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500134};
135
Tejun Heo3c798392012-04-16 13:57:25 -0700136extern struct blkcg blkcg_root;
Tejun Heo496d5e72015-05-22 17:13:21 -0400137extern struct cgroup_subsys_state * const blkcg_root_css;
Tejun Heo36558c82012-04-16 13:57:24 -0700138
Tejun Heo3c798392012-04-16 13:57:25 -0700139struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
140struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
141 struct request_queue *q);
Tejun Heo36558c82012-04-16 13:57:24 -0700142int blkcg_init_queue(struct request_queue *q);
143void blkcg_drain_queue(struct request_queue *q);
144void blkcg_exit_queue(struct request_queue *q);
Tejun Heo5efd6112012-03-05 13:15:12 -0800145
Vivek Goyal3e252062009-12-04 10:36:42 -0500146/* Blkio controller policy registration */
Jens Axboed5bf0292014-06-22 16:31:56 -0600147int blkcg_policy_register(struct blkcg_policy *pol);
Tejun Heo3c798392012-04-16 13:57:25 -0700148void blkcg_policy_unregister(struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700149int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700150 const struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700151void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700152 const struct blkcg_policy *pol);
Vivek Goyal3e252062009-12-04 10:36:42 -0500153
Tejun Heo3c798392012-04-16 13:57:25 -0700154void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700155 u64 (*prfill)(struct seq_file *,
156 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700157 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700158 bool show_total);
Tejun Heof95a04a2012-04-16 13:57:26 -0700159u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
160u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo829fdb52012-04-01 14:38:43 -0700161 const struct blkg_rwstat *rwstat);
Tejun Heof95a04a2012-04-16 13:57:26 -0700162u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
163u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
164 int off);
Tejun Heo829fdb52012-04-01 14:38:43 -0700165
Tejun Heo16b3de62013-01-09 08:05:12 -0800166u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
167struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
168 int off);
169
Tejun Heo829fdb52012-04-01 14:38:43 -0700170struct blkg_conf_ctx {
Tejun Heo36558c82012-04-16 13:57:24 -0700171 struct gendisk *disk;
Tejun Heo3c798392012-04-16 13:57:25 -0700172 struct blkcg_gq *blkg;
Tejun Heo36558c82012-04-16 13:57:24 -0700173 u64 v;
Tejun Heo829fdb52012-04-01 14:38:43 -0700174};
175
Tejun Heo3c798392012-04-16 13:57:25 -0700176int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
177 const char *input, struct blkg_conf_ctx *ctx);
Tejun Heo829fdb52012-04-01 14:38:43 -0700178void blkg_conf_finish(struct blkg_conf_ctx *ctx);
179
180
Tejun Heoa7c6d552013-08-08 20:11:23 -0400181static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
182{
183 return css ? container_of(css, struct blkcg, css) : NULL;
184}
185
Tejun Heob1208b52012-06-04 20:40:57 -0700186static inline struct blkcg *task_blkcg(struct task_struct *tsk)
187{
Tejun Heo073219e2014-02-08 10:36:58 -0500188 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
Tejun Heob1208b52012-06-04 20:40:57 -0700189}
190
191static inline struct blkcg *bio_blkcg(struct bio *bio)
192{
193 if (bio && bio->bi_css)
Tejun Heoa7c6d552013-08-08 20:11:23 -0400194 return css_to_blkcg(bio->bi_css);
Tejun Heob1208b52012-06-04 20:40:57 -0700195 return task_blkcg(current);
196}
197
Tejun Heofd383c22015-05-22 17:13:23 -0400198static inline struct cgroup_subsys_state *
199task_get_blkcg_css(struct task_struct *task)
200{
201 return task_get_css(task, blkio_cgrp_id);
202}
203
Tejun Heo03814112012-03-05 13:15:14 -0800204/**
Tejun Heo3c547862013-01-09 08:05:10 -0800205 * blkcg_parent - get the parent of a blkcg
206 * @blkcg: blkcg of interest
207 *
208 * Return the parent blkcg of @blkcg. Can be called anytime.
209 */
210static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
211{
Tejun Heo5c9d5352014-05-16 13:22:48 -0400212 return css_to_blkcg(blkcg->css.parent);
Tejun Heo3c547862013-01-09 08:05:10 -0800213}
214
215/**
Tejun Heo03814112012-03-05 13:15:14 -0800216 * blkg_to_pdata - get policy private data
217 * @blkg: blkg of interest
218 * @pol: policy of interest
219 *
220 * Return pointer to private data associated with the @blkg-@pol pair.
221 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700222static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
223 struct blkcg_policy *pol)
Tejun Heo03814112012-03-05 13:15:14 -0800224{
Tejun Heof95a04a2012-04-16 13:57:26 -0700225 return blkg ? blkg->pd[pol->plid] : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800226}
227
228/**
229 * pdata_to_blkg - get blkg associated with policy private data
Tejun Heof95a04a2012-04-16 13:57:26 -0700230 * @pd: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800231 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700232 * @pd is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800233 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700234static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
Tejun Heo03814112012-03-05 13:15:14 -0800235{
Tejun Heof95a04a2012-04-16 13:57:26 -0700236 return pd ? pd->blkg : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800237}
238
Tejun Heo54e7ed12012-04-16 13:57:23 -0700239/**
240 * blkg_path - format cgroup path of blkg
241 * @blkg: blkg of interest
242 * @buf: target buffer
243 * @buflen: target buffer length
244 *
245 * Format the path of the cgroup of @blkg into @buf.
246 */
Tejun Heo3c798392012-04-16 13:57:25 -0700247static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
Vivek Goyalafc24d42010-04-26 19:27:56 +0200248{
Tejun Heoe61734c2014-02-12 09:29:50 -0500249 char *p;
Tejun Heo54e7ed12012-04-16 13:57:23 -0700250
Tejun Heoe61734c2014-02-12 09:29:50 -0500251 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
252 if (!p) {
Tejun Heo54e7ed12012-04-16 13:57:23 -0700253 strncpy(buf, "<unavailable>", buflen);
Tejun Heoe61734c2014-02-12 09:29:50 -0500254 return -ENAMETOOLONG;
255 }
256
257 memmove(buf, p, buf + buflen - p);
258 return 0;
Vivek Goyalafc24d42010-04-26 19:27:56 +0200259}
260
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800261/**
262 * blkg_get - get a blkg reference
263 * @blkg: blkg to get
264 *
Tejun Heoa5049a82014-06-19 17:42:57 -0400265 * The caller should be holding an existing reference.
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800266 */
Tejun Heo3c798392012-04-16 13:57:25 -0700267static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800268{
Tejun Heoa5049a82014-06-19 17:42:57 -0400269 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
270 atomic_inc(&blkg->refcnt);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800271}
272
Tejun Heo2a4fd072013-05-14 13:52:31 -0700273void __blkg_release_rcu(struct rcu_head *rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800274
275/**
276 * blkg_put - put a blkg reference
277 * @blkg: blkg to put
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800278 */
Tejun Heo3c798392012-04-16 13:57:25 -0700279static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800280{
Tejun Heoa5049a82014-06-19 17:42:57 -0400281 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
282 if (atomic_dec_and_test(&blkg->refcnt))
Tejun Heo2a4fd072013-05-14 13:52:31 -0700283 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800284}
285
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700286struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
287 bool update_hint);
288
289/**
290 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
291 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400292 * @pos_css: used for iteration
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700293 * @p_blkg: target blkg to walk descendants of
294 *
295 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
296 * read locked. If called under either blkcg or queue lock, the iteration
297 * is guaranteed to include all and only online blkgs. The caller may
Tejun Heo492eb212013-08-08 20:11:25 -0400298 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
Tejun Heobd8815a2013-08-08 20:11:27 -0400299 * @p_blkg is included in the iteration and the first node to be visited.
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700300 */
Tejun Heo492eb212013-08-08 20:11:25 -0400301#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
302 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
303 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700304 (p_blkg)->q, false)))
305
Tejun Heoedcb0722012-04-01 14:38:42 -0700306/**
Tejun Heoaa539cb2013-05-14 13:52:31 -0700307 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
308 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400309 * @pos_css: used for iteration
Tejun Heoaa539cb2013-05-14 13:52:31 -0700310 * @p_blkg: target blkg to walk descendants of
311 *
312 * Similar to blkg_for_each_descendant_pre() but performs post-order
Tejun Heobd8815a2013-08-08 20:11:27 -0400313 * traversal instead. Synchronization rules are the same. @p_blkg is
314 * included in the iteration and the last node to be visited.
Tejun Heoaa539cb2013-05-14 13:52:31 -0700315 */
Tejun Heo492eb212013-08-08 20:11:25 -0400316#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
317 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
318 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heoaa539cb2013-05-14 13:52:31 -0700319 (p_blkg)->q, false)))
320
321/**
Tejun Heoa0516612012-06-26 15:05:44 -0700322 * blk_get_rl - get request_list to use
323 * @q: request_queue of interest
324 * @bio: bio which will be attached to the allocated request (may be %NULL)
325 *
326 * The caller wants to allocate a request from @q to use for @bio. Find
327 * the request_list to use and obtain a reference on it. Should be called
328 * under queue_lock. This function is guaranteed to return non-%NULL
329 * request_list.
330 */
331static inline struct request_list *blk_get_rl(struct request_queue *q,
332 struct bio *bio)
333{
334 struct blkcg *blkcg;
335 struct blkcg_gq *blkg;
336
337 rcu_read_lock();
338
339 blkcg = bio_blkcg(bio);
340
341 /* bypass blkg lookup and use @q->root_rl directly for root */
342 if (blkcg == &blkcg_root)
343 goto root_rl;
344
345 /*
346 * Try to use blkg->rl. blkg lookup may fail under memory pressure
347 * or if either the blkcg or queue is going away. Fall back to
348 * root_rl in such cases.
349 */
350 blkg = blkg_lookup_create(blkcg, q);
351 if (unlikely(IS_ERR(blkg)))
352 goto root_rl;
353
354 blkg_get(blkg);
355 rcu_read_unlock();
356 return &blkg->rl;
357root_rl:
358 rcu_read_unlock();
359 return &q->root_rl;
360}
361
362/**
363 * blk_put_rl - put request_list
364 * @rl: request_list to put
365 *
366 * Put the reference acquired by blk_get_rl(). Should be called under
367 * queue_lock.
368 */
369static inline void blk_put_rl(struct request_list *rl)
370{
371 /* root_rl may not have blkg set */
372 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
373 blkg_put(rl->blkg);
374}
375
376/**
377 * blk_rq_set_rl - associate a request with a request_list
378 * @rq: request of interest
379 * @rl: target request_list
380 *
381 * Associate @rq with @rl so that accounting and freeing can know the
382 * request_list @rq came from.
383 */
384static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
385{
386 rq->rl = rl;
387}
388
389/**
390 * blk_rq_rl - return the request_list a request came from
391 * @rq: request of interest
392 *
393 * Return the request_list @rq is allocated from.
394 */
395static inline struct request_list *blk_rq_rl(struct request *rq)
396{
397 return rq->rl;
398}
399
400struct request_list *__blk_queue_next_rl(struct request_list *rl,
401 struct request_queue *q);
402/**
403 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
404 *
405 * Should be used under queue_lock.
406 */
407#define blk_queue_for_each_rl(rl, q) \
408 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
409
Peter Zijlstra90d38392013-11-12 19:42:14 -0800410static inline void blkg_stat_init(struct blkg_stat *stat)
411{
412 u64_stats_init(&stat->syncp);
413}
414
Tejun Heoa0516612012-06-26 15:05:44 -0700415/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700416 * blkg_stat_add - add a value to a blkg_stat
417 * @stat: target blkg_stat
418 * @val: value to add
419 *
420 * Add @val to @stat. The caller is responsible for synchronizing calls to
421 * this function.
422 */
423static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
424{
425 u64_stats_update_begin(&stat->syncp);
426 stat->cnt += val;
427 u64_stats_update_end(&stat->syncp);
428}
429
430/**
431 * blkg_stat_read - read the current value of a blkg_stat
432 * @stat: blkg_stat to read
433 *
434 * Read the current value of @stat. This function can be called without
435 * synchroniztion and takes care of u64 atomicity.
436 */
437static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
438{
439 unsigned int start;
440 uint64_t v;
441
442 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700443 start = u64_stats_fetch_begin_irq(&stat->syncp);
Tejun Heoedcb0722012-04-01 14:38:42 -0700444 v = stat->cnt;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700445 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
Tejun Heoedcb0722012-04-01 14:38:42 -0700446
447 return v;
448}
449
450/**
451 * blkg_stat_reset - reset a blkg_stat
452 * @stat: blkg_stat to reset
453 */
454static inline void blkg_stat_reset(struct blkg_stat *stat)
455{
456 stat->cnt = 0;
457}
458
459/**
Tejun Heo16b3de62013-01-09 08:05:12 -0800460 * blkg_stat_merge - merge a blkg_stat into another
461 * @to: the destination blkg_stat
462 * @from: the source
463 *
464 * Add @from's count to @to.
465 */
466static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
467{
468 blkg_stat_add(to, blkg_stat_read(from));
469}
470
Peter Zijlstra90d38392013-11-12 19:42:14 -0800471static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
472{
473 u64_stats_init(&rwstat->syncp);
474}
475
Tejun Heo16b3de62013-01-09 08:05:12 -0800476/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700477 * blkg_rwstat_add - add a value to a blkg_rwstat
478 * @rwstat: target blkg_rwstat
479 * @rw: mask of REQ_{WRITE|SYNC}
480 * @val: value to add
481 *
482 * Add @val to @rwstat. The counters are chosen according to @rw. The
483 * caller is responsible for synchronizing calls to this function.
484 */
485static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
486 int rw, uint64_t val)
487{
488 u64_stats_update_begin(&rwstat->syncp);
489
490 if (rw & REQ_WRITE)
491 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
492 else
493 rwstat->cnt[BLKG_RWSTAT_READ] += val;
494 if (rw & REQ_SYNC)
495 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
496 else
497 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
498
499 u64_stats_update_end(&rwstat->syncp);
500}
501
502/**
503 * blkg_rwstat_read - read the current values of a blkg_rwstat
504 * @rwstat: blkg_rwstat to read
505 *
506 * Read the current snapshot of @rwstat and return it as the return value.
507 * This function can be called without synchronization and takes care of
508 * u64 atomicity.
509 */
Tejun Heoc94bed892012-04-16 13:57:22 -0700510static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700511{
512 unsigned int start;
513 struct blkg_rwstat tmp;
514
515 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700516 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
Tejun Heoedcb0722012-04-01 14:38:42 -0700517 tmp = *rwstat;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700518 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
Tejun Heoedcb0722012-04-01 14:38:42 -0700519
520 return tmp;
521}
522
523/**
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800524 * blkg_rwstat_total - read the total count of a blkg_rwstat
Tejun Heoedcb0722012-04-01 14:38:42 -0700525 * @rwstat: blkg_rwstat to read
526 *
527 * Return the total count of @rwstat regardless of the IO direction. This
528 * function can be called without synchronization and takes care of u64
529 * atomicity.
530 */
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800531static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700532{
533 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
534
535 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
536}
537
538/**
539 * blkg_rwstat_reset - reset a blkg_rwstat
540 * @rwstat: blkg_rwstat to reset
541 */
542static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
543{
544 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
545}
546
Tejun Heo16b3de62013-01-09 08:05:12 -0800547/**
548 * blkg_rwstat_merge - merge a blkg_rwstat into another
549 * @to: the destination blkg_rwstat
550 * @from: the source
551 *
552 * Add @from's counts to @to.
553 */
554static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
555 struct blkg_rwstat *from)
556{
557 struct blkg_rwstat v = blkg_rwstat_read(from);
558 int i;
559
560 u64_stats_update_begin(&to->syncp);
561 for (i = 0; i < BLKG_RWSTAT_NR; i++)
562 to->cnt[i] += v.cnt[i];
563 u64_stats_update_end(&to->syncp);
564}
565
Tejun Heo36558c82012-04-16 13:57:24 -0700566#else /* CONFIG_BLK_CGROUP */
567
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400568struct blkcg {
569};
Jens Axboe2f5ea472009-12-03 21:06:43 +0100570
Tejun Heof95a04a2012-04-16 13:57:26 -0700571struct blkg_policy_data {
572};
573
Tejun Heo3c798392012-04-16 13:57:25 -0700574struct blkcg_gq {
Jens Axboe2f5ea472009-12-03 21:06:43 +0100575};
576
Tejun Heo3c798392012-04-16 13:57:25 -0700577struct blkcg_policy {
Vivek Goyal3e252062009-12-04 10:36:42 -0500578};
579
Tejun Heo496d5e72015-05-22 17:13:21 -0400580#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
581
Tejun Heofd383c22015-05-22 17:13:23 -0400582static inline struct cgroup_subsys_state *
583task_get_blkcg_css(struct task_struct *task)
584{
585 return NULL;
586}
587
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400588#ifdef CONFIG_BLOCK
589
Tejun Heo3c798392012-04-16 13:57:25 -0700590static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
Tejun Heo5efd6112012-03-05 13:15:12 -0800591static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
592static inline void blkcg_drain_queue(struct request_queue *q) { }
593static inline void blkcg_exit_queue(struct request_queue *q) { }
Jens Axboed5bf0292014-06-22 16:31:56 -0600594static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
Tejun Heo3c798392012-04-16 13:57:25 -0700595static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
Tejun Heoa2b16932012-04-13 13:11:33 -0700596static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700597 const struct blkcg_policy *pol) { return 0; }
Tejun Heoa2b16932012-04-13 13:11:33 -0700598static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700599 const struct blkcg_policy *pol) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500600
Tejun Heob1208b52012-06-04 20:40:57 -0700601static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
Tejun Heoa0516612012-06-26 15:05:44 -0700602
Tejun Heof95a04a2012-04-16 13:57:26 -0700603static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
604 struct blkcg_policy *pol) { return NULL; }
605static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo3c798392012-04-16 13:57:25 -0700606static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
607static inline void blkg_get(struct blkcg_gq *blkg) { }
608static inline void blkg_put(struct blkcg_gq *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200609
Tejun Heoa0516612012-06-26 15:05:44 -0700610static inline struct request_list *blk_get_rl(struct request_queue *q,
611 struct bio *bio) { return &q->root_rl; }
612static inline void blk_put_rl(struct request_list *rl) { }
613static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
614static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
615
616#define blk_queue_for_each_rl(rl, q) \
617 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
618
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400619#endif /* CONFIG_BLOCK */
Tejun Heo36558c82012-04-16 13:57:24 -0700620#endif /* CONFIG_BLK_CGROUP */
621#endif /* _BLK_CGROUP_H */