blob: de57de4831d5327e52f5d0b8de880c8479533407 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Vivek Goyal31e4c282009-12-03 12:59:42 -05002#ifndef _BLK_CGROUP_H
3#define _BLK_CGROUP_H
4/*
5 * Common Block IO controller cgroup interface
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17#include <linux/cgroup.h>
Tejun Heo24bdb8e2015-08-18 14:55:22 -070018#include <linux/percpu_counter.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070019#include <linux/seq_file.h>
Tejun Heoa6371202012-04-19 16:29:24 -070020#include <linux/radix-tree.h>
Tejun Heoa0516612012-06-26 15:05:44 -070021#include <linux/blkdev.h>
Tejun Heoa5049a82014-06-19 17:42:57 -040022#include <linux/atomic.h>
Shaohua Li902ec5b2017-09-14 14:02:06 -070023#include <linux/kthread.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050024
Tejun Heo24bdb8e2015-08-18 14:55:22 -070025/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
26#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
27
Vivek Goyal9355aed2010-10-01 21:16:41 +020028/* Max limits for throttle policy */
29#define THROTL_IOPS_MAX UINT_MAX
30
Tejun Heof48ec1d2012-04-13 13:11:25 -070031#ifdef CONFIG_BLK_CGROUP
32
Tejun Heoedcb0722012-04-01 14:38:42 -070033enum blkg_rwstat_type {
34 BLKG_RWSTAT_READ,
35 BLKG_RWSTAT_WRITE,
36 BLKG_RWSTAT_SYNC,
37 BLKG_RWSTAT_ASYNC,
38
39 BLKG_RWSTAT_NR,
40 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070041};
42
Tejun Heoa6371202012-04-19 16:29:24 -070043struct blkcg_gq;
44
Tejun Heo3c798392012-04-16 13:57:25 -070045struct blkcg {
Tejun Heo36558c82012-04-16 13:57:24 -070046 struct cgroup_subsys_state css;
47 spinlock_t lock;
Tejun Heoa6371202012-04-19 16:29:24 -070048
49 struct radix_tree_root blkg_tree;
Bart Van Assche55679c82016-09-23 09:07:56 -070050 struct blkcg_gq __rcu *blkg_hint;
Tejun Heo36558c82012-04-16 13:57:24 -070051 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070052
Tejun Heo81437642015-08-18 14:55:15 -070053 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
Tejun Heo52ebea72015-05-22 17:13:37 -040054
Tejun Heo7876f932015-07-09 16:39:49 -040055 struct list_head all_blkcgs_node;
Tejun Heo52ebea72015-05-22 17:13:37 -040056#ifdef CONFIG_CGROUP_WRITEBACK
57 struct list_head cgwb_list;
58#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -050059};
60
Tejun Heoe6269c42015-08-18 14:55:21 -070061/*
62 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
Tejun Heo24bdb8e2015-08-18 14:55:22 -070063 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
64 * to carry result values from read and sum operations.
Tejun Heoe6269c42015-08-18 14:55:21 -070065 */
Tejun Heoedcb0722012-04-01 14:38:42 -070066struct blkg_stat {
Tejun Heo24bdb8e2015-08-18 14:55:22 -070067 struct percpu_counter cpu_cnt;
Tejun Heoe6269c42015-08-18 14:55:21 -070068 atomic64_t aux_cnt;
Tejun Heoedcb0722012-04-01 14:38:42 -070069};
70
71struct blkg_rwstat {
Tejun Heo24bdb8e2015-08-18 14:55:22 -070072 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
Tejun Heoe6269c42015-08-18 14:55:21 -070073 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
Tejun Heoedcb0722012-04-01 14:38:42 -070074};
75
Tejun Heof95a04a2012-04-16 13:57:26 -070076/*
77 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
78 * request_queue (q). This is used by blkcg policies which need to track
79 * information per blkcg - q pair.
80 *
Tejun Heo001bea72015-08-18 14:55:11 -070081 * There can be multiple active blkcg policies and each blkg:policy pair is
82 * represented by a blkg_policy_data which is allocated and freed by each
83 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
84 * area by allocating larger data structure which embeds blkg_policy_data
85 * at the beginning.
Tejun Heof95a04a2012-04-16 13:57:26 -070086 */
Tejun Heo03814112012-03-05 13:15:14 -080087struct blkg_policy_data {
Tejun Heob276a872013-01-09 08:05:12 -080088 /* the blkg and policy id this per-policy data belongs to */
Tejun Heo3c798392012-04-16 13:57:25 -070089 struct blkcg_gq *blkg;
Tejun Heob276a872013-01-09 08:05:12 -080090 int plid;
Joseph Qi4c699482018-03-16 14:51:27 +080091 bool offline;
Tejun Heo03814112012-03-05 13:15:14 -080092};
93
Arianna Avanzinie48453c2015-06-05 23:38:42 +020094/*
Tejun Heoe4a9bde2015-08-18 14:55:16 -070095 * Policies that need to keep per-blkcg data which is independent from any
96 * request_queue associated to it should implement cpd_alloc/free_fn()
97 * methods. A policy can allocate private data area by allocating larger
98 * data structure which embeds blkcg_policy_data at the beginning.
99 * cpd_init() is invoked to let each policy handle per-blkcg data.
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200100 */
101struct blkcg_policy_data {
Tejun Heo81437642015-08-18 14:55:15 -0700102 /* the blkcg and policy id this per-policy data belongs to */
103 struct blkcg *blkcg;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200104 int plid;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200105};
106
Tejun Heo3c798392012-04-16 13:57:25 -0700107/* association between a blk cgroup and a request queue */
108struct blkcg_gq {
Tejun Heoc875f4d2012-03-05 13:15:22 -0800109 /* Pointer to the associated request_queue */
Tejun Heo36558c82012-04-16 13:57:24 -0700110 struct request_queue *q;
111 struct list_head q_node;
112 struct hlist_node blkcg_node;
Tejun Heo3c798392012-04-16 13:57:25 -0700113 struct blkcg *blkcg;
Tejun Heo3c547862013-01-09 08:05:10 -0800114
Tejun Heoce7acfe2015-05-22 17:13:38 -0400115 /*
116 * Each blkg gets congested separately and the congestion state is
117 * propagated to the matching bdi_writeback_congested.
118 */
119 struct bdi_writeback_congested *wb_congested;
120
Tejun Heo3c547862013-01-09 08:05:10 -0800121 /* all non-root blkcg_gq's are guaranteed to have access to parent */
122 struct blkcg_gq *parent;
123
Tejun Heoa0516612012-06-26 15:05:44 -0700124 /* request allocation list for this blkcg-q pair */
125 struct request_list rl;
Tejun Heo3c547862013-01-09 08:05:10 -0800126
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800127 /* reference count */
Tejun Heoa5049a82014-06-19 17:42:57 -0400128 atomic_t refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500129
Tejun Heof427d902013-01-09 08:05:12 -0800130 /* is this blkg online? protected by both blkcg and q locks */
131 bool online;
132
Tejun Heo77ea7332015-08-18 14:55:24 -0700133 struct blkg_rwstat stat_bytes;
134 struct blkg_rwstat stat_ios;
135
Tejun Heo36558c82012-04-16 13:57:24 -0700136 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800137
Tejun Heo36558c82012-04-16 13:57:24 -0700138 struct rcu_head rcu_head;
Josef Bacikd09d8df2018-07-03 11:14:55 -0400139
140 atomic_t use_delay;
141 atomic64_t delay_nsec;
142 atomic64_t delay_start;
143 u64 last_delay;
144 int last_use;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500145};
146
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700147typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
Tejun Heo81437642015-08-18 14:55:15 -0700148typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700149typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
Tejun Heo69d7fde2015-08-18 14:55:36 -0700150typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
Tejun Heo001bea72015-08-18 14:55:11 -0700151typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
Tejun Heoa9520cd2015-08-18 14:55:14 -0700152typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
153typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
154typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
Tejun Heo001bea72015-08-18 14:55:11 -0700155typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
Tejun Heoa9520cd2015-08-18 14:55:14 -0700156typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
Josef Bacik903d23f2018-07-03 11:14:52 -0400157typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
158 size_t size);
Vivek Goyal3e252062009-12-04 10:36:42 -0500159
Tejun Heo3c798392012-04-16 13:57:25 -0700160struct blkcg_policy {
Tejun Heo36558c82012-04-16 13:57:24 -0700161 int plid;
Tejun Heo36558c82012-04-16 13:57:24 -0700162 /* cgroup files for the policy */
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700163 struct cftype *dfl_cftypes;
Tejun Heo880f50e2015-08-18 14:55:30 -0700164 struct cftype *legacy_cftypes;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700165
166 /* operations */
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700167 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200168 blkcg_pol_init_cpd_fn *cpd_init_fn;
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700169 blkcg_pol_free_cpd_fn *cpd_free_fn;
Tejun Heo69d7fde2015-08-18 14:55:36 -0700170 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700171
Tejun Heo001bea72015-08-18 14:55:11 -0700172 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700173 blkcg_pol_init_pd_fn *pd_init_fn;
Tejun Heof427d902013-01-09 08:05:12 -0800174 blkcg_pol_online_pd_fn *pd_online_fn;
175 blkcg_pol_offline_pd_fn *pd_offline_fn;
Tejun Heo001bea72015-08-18 14:55:11 -0700176 blkcg_pol_free_pd_fn *pd_free_fn;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700177 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
Josef Bacik903d23f2018-07-03 11:14:52 -0400178 blkcg_pol_stat_pd_fn *pd_stat_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500179};
180
Tejun Heo3c798392012-04-16 13:57:25 -0700181extern struct blkcg blkcg_root;
Tejun Heo496d5e72015-05-22 17:13:21 -0400182extern struct cgroup_subsys_state * const blkcg_root_css;
Tejun Heo36558c82012-04-16 13:57:24 -0700183
Tejun Heo24f29042015-08-18 14:55:17 -0700184struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
185 struct request_queue *q, bool update_hint);
Tejun Heo3c798392012-04-16 13:57:25 -0700186struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
187 struct request_queue *q);
Tejun Heo36558c82012-04-16 13:57:24 -0700188int blkcg_init_queue(struct request_queue *q);
189void blkcg_drain_queue(struct request_queue *q);
190void blkcg_exit_queue(struct request_queue *q);
Tejun Heo5efd6112012-03-05 13:15:12 -0800191
Vivek Goyal3e252062009-12-04 10:36:42 -0500192/* Blkio controller policy registration */
Jens Axboed5bf0292014-06-22 16:31:56 -0600193int blkcg_policy_register(struct blkcg_policy *pol);
Tejun Heo3c798392012-04-16 13:57:25 -0700194void blkcg_policy_unregister(struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700195int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700196 const struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700197void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700198 const struct blkcg_policy *pol);
Vivek Goyal3e252062009-12-04 10:36:42 -0500199
Tejun Heodd165eb2015-08-18 14:55:33 -0700200const char *blkg_dev_name(struct blkcg_gq *blkg);
Tejun Heo3c798392012-04-16 13:57:25 -0700201void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700202 u64 (*prfill)(struct seq_file *,
203 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700204 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700205 bool show_total);
Tejun Heof95a04a2012-04-16 13:57:26 -0700206u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
207u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo829fdb52012-04-01 14:38:43 -0700208 const struct blkg_rwstat *rwstat);
Tejun Heof95a04a2012-04-16 13:57:26 -0700209u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
210u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
211 int off);
Tejun Heo77ea7332015-08-18 14:55:24 -0700212int blkg_print_stat_bytes(struct seq_file *sf, void *v);
213int blkg_print_stat_ios(struct seq_file *sf, void *v);
214int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
215int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
Tejun Heo829fdb52012-04-01 14:38:43 -0700216
Tejun Heof12c74c2015-08-18 14:55:23 -0700217u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
218 struct blkcg_policy *pol, int off);
219struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
220 struct blkcg_policy *pol, int off);
Tejun Heo16b3de62013-01-09 08:05:12 -0800221
Tejun Heo829fdb52012-04-01 14:38:43 -0700222struct blkg_conf_ctx {
Tejun Heo36558c82012-04-16 13:57:24 -0700223 struct gendisk *disk;
Tejun Heo3c798392012-04-16 13:57:25 -0700224 struct blkcg_gq *blkg;
Tejun Heo36aa9e52015-08-18 14:55:31 -0700225 char *body;
Tejun Heo829fdb52012-04-01 14:38:43 -0700226};
227
Tejun Heo3c798392012-04-16 13:57:25 -0700228int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
Tejun Heo36aa9e52015-08-18 14:55:31 -0700229 char *input, struct blkg_conf_ctx *ctx);
Tejun Heo829fdb52012-04-01 14:38:43 -0700230void blkg_conf_finish(struct blkg_conf_ctx *ctx);
231
232
Tejun Heoa7c6d552013-08-08 20:11:23 -0400233static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
234{
235 return css ? container_of(css, struct blkcg, css) : NULL;
236}
237
Tejun Heob1208b52012-06-04 20:40:57 -0700238static inline struct blkcg *bio_blkcg(struct bio *bio)
239{
Shaohua Li902ec5b2017-09-14 14:02:06 -0700240 struct cgroup_subsys_state *css;
241
Tejun Heob1208b52012-06-04 20:40:57 -0700242 if (bio && bio->bi_css)
Tejun Heoa7c6d552013-08-08 20:11:23 -0400243 return css_to_blkcg(bio->bi_css);
Shaohua Li902ec5b2017-09-14 14:02:06 -0700244 css = kthread_blkcg();
245 if (css)
246 return css_to_blkcg(css);
247 return css_to_blkcg(task_css(current, io_cgrp_id));
Tejun Heofd383c22015-05-22 17:13:23 -0400248}
249
Josef Bacikd09d8df2018-07-03 11:14:55 -0400250static inline bool blk_cgroup_congested(void)
251{
252 struct cgroup_subsys_state *css;
253 bool ret = false;
254
255 rcu_read_lock();
256 css = kthread_blkcg();
257 if (!css)
258 css = task_css(current, io_cgrp_id);
259 while (css) {
260 if (atomic_read(&css->cgroup->congestion_count)) {
261 ret = true;
262 break;
263 }
264 css = css->parent;
265 }
266 rcu_read_unlock();
267 return ret;
268}
269
Tejun Heo03814112012-03-05 13:15:14 -0800270/**
Josef Bacikc7c98fd2018-07-03 11:14:51 -0400271 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
272 * @return: true if this bio needs to be submitted with the root blkg context.
273 *
274 * In order to avoid priority inversions we sometimes need to issue a bio as if
275 * it were attached to the root blkg, and then backcharge to the actual owning
276 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
277 * bio and attach the appropriate blkg to the bio. Then we call this helper and
278 * if it is true run with the root blkg for that queue and then do any
279 * backcharging to the originating cgroup once the io is complete.
280 */
281static inline bool bio_issue_as_root_blkg(struct bio *bio)
282{
Josef Bacik0d1e0c72018-07-03 11:14:53 -0400283 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
Josef Bacikc7c98fd2018-07-03 11:14:51 -0400284}
285
286/**
Tejun Heo3c547862013-01-09 08:05:10 -0800287 * blkcg_parent - get the parent of a blkcg
288 * @blkcg: blkcg of interest
289 *
290 * Return the parent blkcg of @blkcg. Can be called anytime.
291 */
292static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
293{
Tejun Heo5c9d5352014-05-16 13:22:48 -0400294 return css_to_blkcg(blkcg->css.parent);
Tejun Heo3c547862013-01-09 08:05:10 -0800295}
296
297/**
Tejun Heo24f29042015-08-18 14:55:17 -0700298 * __blkg_lookup - internal version of blkg_lookup()
299 * @blkcg: blkcg of interest
300 * @q: request_queue of interest
301 * @update_hint: whether to update lookup hint with the result or not
302 *
303 * This is internal version and shouldn't be used by policy
304 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
305 * @q's bypass state. If @update_hint is %true, the caller should be
306 * holding @q->queue_lock and lookup hint is updated on success.
307 */
308static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
309 struct request_queue *q,
310 bool update_hint)
311{
312 struct blkcg_gq *blkg;
313
Tejun Heo85b6bc92015-08-18 14:55:18 -0700314 if (blkcg == &blkcg_root)
315 return q->root_blkg;
316
Tejun Heo24f29042015-08-18 14:55:17 -0700317 blkg = rcu_dereference(blkcg->blkg_hint);
318 if (blkg && blkg->q == q)
319 return blkg;
320
321 return blkg_lookup_slowpath(blkcg, q, update_hint);
322}
323
324/**
325 * blkg_lookup - lookup blkg for the specified blkcg - q pair
326 * @blkcg: blkcg of interest
327 * @q: request_queue of interest
328 *
329 * Lookup blkg for the @blkcg - @q pair. This function should be called
330 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
331 * - see blk_queue_bypass_start() for details.
332 */
333static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
334 struct request_queue *q)
335{
336 WARN_ON_ONCE(!rcu_read_lock_held());
337
338 if (unlikely(blk_queue_bypass(q)))
339 return NULL;
340 return __blkg_lookup(blkcg, q, false);
341}
342
343/**
Tejun Heo03814112012-03-05 13:15:14 -0800344 * blkg_to_pdata - get policy private data
345 * @blkg: blkg of interest
346 * @pol: policy of interest
347 *
348 * Return pointer to private data associated with the @blkg-@pol pair.
349 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700350static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
351 struct blkcg_policy *pol)
Tejun Heo03814112012-03-05 13:15:14 -0800352{
Tejun Heof95a04a2012-04-16 13:57:26 -0700353 return blkg ? blkg->pd[pol->plid] : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800354}
355
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200356static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
357 struct blkcg_policy *pol)
358{
Tejun Heo81437642015-08-18 14:55:15 -0700359 return blkcg ? blkcg->cpd[pol->plid] : NULL;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200360}
361
Tejun Heo03814112012-03-05 13:15:14 -0800362/**
363 * pdata_to_blkg - get blkg associated with policy private data
Tejun Heof95a04a2012-04-16 13:57:26 -0700364 * @pd: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800365 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700366 * @pd is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800367 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700368static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
Tejun Heo03814112012-03-05 13:15:14 -0800369{
Tejun Heof95a04a2012-04-16 13:57:26 -0700370 return pd ? pd->blkg : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800371}
372
Tejun Heo81437642015-08-18 14:55:15 -0700373static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
374{
375 return cpd ? cpd->blkcg : NULL;
376}
377
Tejun Heo54e7ed12012-04-16 13:57:23 -0700378/**
379 * blkg_path - format cgroup path of blkg
380 * @blkg: blkg of interest
381 * @buf: target buffer
382 * @buflen: target buffer length
383 *
384 * Format the path of the cgroup of @blkg into @buf.
385 */
Tejun Heo3c798392012-04-16 13:57:25 -0700386static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
Vivek Goyalafc24d42010-04-26 19:27:56 +0200387{
Tejun Heo4c737b42016-08-10 11:23:44 -0400388 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
Vivek Goyalafc24d42010-04-26 19:27:56 +0200389}
390
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800391/**
392 * blkg_get - get a blkg reference
393 * @blkg: blkg to get
394 *
Tejun Heoa5049a82014-06-19 17:42:57 -0400395 * The caller should be holding an existing reference.
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800396 */
Tejun Heo3c798392012-04-16 13:57:25 -0700397static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800398{
Tejun Heoa5049a82014-06-19 17:42:57 -0400399 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
400 atomic_inc(&blkg->refcnt);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800401}
402
Josef Bacikd09d8df2018-07-03 11:14:55 -0400403/**
404 * blkg_try_get - try and get a blkg reference
405 * @blkg: blkg to get
406 *
407 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
408 * of freeing this blkg, so we can only use it if the refcnt is not zero.
409 */
410static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
411{
412 if (atomic_inc_not_zero(&blkg->refcnt))
413 return blkg;
414 return NULL;
415}
416
417
Tejun Heo2a4fd072013-05-14 13:52:31 -0700418void __blkg_release_rcu(struct rcu_head *rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800419
420/**
421 * blkg_put - put a blkg reference
422 * @blkg: blkg to put
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800423 */
Tejun Heo3c798392012-04-16 13:57:25 -0700424static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800425{
Tejun Heoa5049a82014-06-19 17:42:57 -0400426 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
427 if (atomic_dec_and_test(&blkg->refcnt))
Tejun Heo2a4fd072013-05-14 13:52:31 -0700428 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800429}
430
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700431/**
432 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
433 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400434 * @pos_css: used for iteration
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700435 * @p_blkg: target blkg to walk descendants of
436 *
437 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
438 * read locked. If called under either blkcg or queue lock, the iteration
439 * is guaranteed to include all and only online blkgs. The caller may
Tejun Heo492eb212013-08-08 20:11:25 -0400440 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
Tejun Heobd8815a2013-08-08 20:11:27 -0400441 * @p_blkg is included in the iteration and the first node to be visited.
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700442 */
Tejun Heo492eb212013-08-08 20:11:25 -0400443#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
444 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
445 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700446 (p_blkg)->q, false)))
447
Tejun Heoedcb0722012-04-01 14:38:42 -0700448/**
Tejun Heoaa539cb2013-05-14 13:52:31 -0700449 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
450 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400451 * @pos_css: used for iteration
Tejun Heoaa539cb2013-05-14 13:52:31 -0700452 * @p_blkg: target blkg to walk descendants of
453 *
454 * Similar to blkg_for_each_descendant_pre() but performs post-order
Tejun Heobd8815a2013-08-08 20:11:27 -0400455 * traversal instead. Synchronization rules are the same. @p_blkg is
456 * included in the iteration and the last node to be visited.
Tejun Heoaa539cb2013-05-14 13:52:31 -0700457 */
Tejun Heo492eb212013-08-08 20:11:25 -0400458#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
459 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
460 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heoaa539cb2013-05-14 13:52:31 -0700461 (p_blkg)->q, false)))
462
463/**
Tejun Heoa0516612012-06-26 15:05:44 -0700464 * blk_get_rl - get request_list to use
465 * @q: request_queue of interest
466 * @bio: bio which will be attached to the allocated request (may be %NULL)
467 *
468 * The caller wants to allocate a request from @q to use for @bio. Find
469 * the request_list to use and obtain a reference on it. Should be called
470 * under queue_lock. This function is guaranteed to return non-%NULL
471 * request_list.
472 */
473static inline struct request_list *blk_get_rl(struct request_queue *q,
474 struct bio *bio)
475{
476 struct blkcg *blkcg;
477 struct blkcg_gq *blkg;
478
479 rcu_read_lock();
480
481 blkcg = bio_blkcg(bio);
482
483 /* bypass blkg lookup and use @q->root_rl directly for root */
484 if (blkcg == &blkcg_root)
485 goto root_rl;
486
487 /*
488 * Try to use blkg->rl. blkg lookup may fail under memory pressure
489 * or if either the blkcg or queue is going away. Fall back to
490 * root_rl in such cases.
491 */
Tejun Heoae118892015-08-18 14:55:20 -0700492 blkg = blkg_lookup(blkcg, q);
493 if (unlikely(!blkg))
Tejun Heoa0516612012-06-26 15:05:44 -0700494 goto root_rl;
495
496 blkg_get(blkg);
497 rcu_read_unlock();
498 return &blkg->rl;
499root_rl:
500 rcu_read_unlock();
501 return &q->root_rl;
502}
503
504/**
505 * blk_put_rl - put request_list
506 * @rl: request_list to put
507 *
508 * Put the reference acquired by blk_get_rl(). Should be called under
509 * queue_lock.
510 */
511static inline void blk_put_rl(struct request_list *rl)
512{
Tejun Heo401efbf2015-08-18 14:55:06 -0700513 if (rl->blkg->blkcg != &blkcg_root)
Tejun Heoa0516612012-06-26 15:05:44 -0700514 blkg_put(rl->blkg);
515}
516
517/**
518 * blk_rq_set_rl - associate a request with a request_list
519 * @rq: request of interest
520 * @rl: target request_list
521 *
522 * Associate @rq with @rl so that accounting and freeing can know the
523 * request_list @rq came from.
524 */
525static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
526{
527 rq->rl = rl;
528}
529
530/**
531 * blk_rq_rl - return the request_list a request came from
532 * @rq: request of interest
533 *
534 * Return the request_list @rq is allocated from.
535 */
536static inline struct request_list *blk_rq_rl(struct request *rq)
537{
538 return rq->rl;
539}
540
541struct request_list *__blk_queue_next_rl(struct request_list *rl,
542 struct request_queue *q);
543/**
544 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
545 *
546 * Should be used under queue_lock.
547 */
548#define blk_queue_for_each_rl(rl, q) \
549 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
550
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700551static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
Peter Zijlstra90d38392013-11-12 19:42:14 -0800552{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700553 int ret;
554
555 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
556 if (ret)
557 return ret;
558
Tejun Heoe6269c42015-08-18 14:55:21 -0700559 atomic64_set(&stat->aux_cnt, 0);
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700560 return 0;
561}
562
563static inline void blkg_stat_exit(struct blkg_stat *stat)
564{
565 percpu_counter_destroy(&stat->cpu_cnt);
Peter Zijlstra90d38392013-11-12 19:42:14 -0800566}
567
Tejun Heoa0516612012-06-26 15:05:44 -0700568/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700569 * blkg_stat_add - add a value to a blkg_stat
570 * @stat: target blkg_stat
571 * @val: value to add
572 *
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700573 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
574 * don't re-enter this function for the same counter.
Tejun Heoedcb0722012-04-01 14:38:42 -0700575 */
576static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
577{
Nikolay Borisov104b4e52017-06-20 21:01:20 +0300578 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
Tejun Heoedcb0722012-04-01 14:38:42 -0700579}
580
581/**
582 * blkg_stat_read - read the current value of a blkg_stat
583 * @stat: blkg_stat to read
Tejun Heoedcb0722012-04-01 14:38:42 -0700584 */
585static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
586{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700587 return percpu_counter_sum_positive(&stat->cpu_cnt);
Tejun Heoedcb0722012-04-01 14:38:42 -0700588}
589
590/**
591 * blkg_stat_reset - reset a blkg_stat
592 * @stat: blkg_stat to reset
593 */
594static inline void blkg_stat_reset(struct blkg_stat *stat)
595{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700596 percpu_counter_set(&stat->cpu_cnt, 0);
Tejun Heoe6269c42015-08-18 14:55:21 -0700597 atomic64_set(&stat->aux_cnt, 0);
Tejun Heoedcb0722012-04-01 14:38:42 -0700598}
599
600/**
Tejun Heoe6269c42015-08-18 14:55:21 -0700601 * blkg_stat_add_aux - add a blkg_stat into another's aux count
Tejun Heo16b3de62013-01-09 08:05:12 -0800602 * @to: the destination blkg_stat
603 * @from: the source
604 *
Tejun Heoe6269c42015-08-18 14:55:21 -0700605 * Add @from's count including the aux one to @to's aux count.
Tejun Heo16b3de62013-01-09 08:05:12 -0800606 */
Tejun Heoe6269c42015-08-18 14:55:21 -0700607static inline void blkg_stat_add_aux(struct blkg_stat *to,
608 struct blkg_stat *from)
Tejun Heo16b3de62013-01-09 08:05:12 -0800609{
Tejun Heoe6269c42015-08-18 14:55:21 -0700610 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
611 &to->aux_cnt);
Tejun Heo16b3de62013-01-09 08:05:12 -0800612}
613
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700614static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
615{
616 int i, ret;
617
618 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
619 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
620 if (ret) {
621 while (--i >= 0)
622 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
623 return ret;
624 }
625 atomic64_set(&rwstat->aux_cnt[i], 0);
626 }
627 return 0;
628}
629
630static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
Peter Zijlstra90d38392013-11-12 19:42:14 -0800631{
Tejun Heoe6269c42015-08-18 14:55:21 -0700632 int i;
633
Tejun Heoe6269c42015-08-18 14:55:21 -0700634 for (i = 0; i < BLKG_RWSTAT_NR; i++)
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700635 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
Peter Zijlstra90d38392013-11-12 19:42:14 -0800636}
637
Tejun Heo16b3de62013-01-09 08:05:12 -0800638/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700639 * blkg_rwstat_add - add a value to a blkg_rwstat
640 * @rwstat: target blkg_rwstat
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600641 * @op: REQ_OP and flags
Tejun Heoedcb0722012-04-01 14:38:42 -0700642 * @val: value to add
643 *
644 * Add @val to @rwstat. The counters are chosen according to @rw. The
645 * caller is responsible for synchronizing calls to this function.
646 */
647static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600648 unsigned int op, uint64_t val)
Tejun Heoedcb0722012-04-01 14:38:42 -0700649{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700650 struct percpu_counter *cnt;
Tejun Heoedcb0722012-04-01 14:38:42 -0700651
Mike Christie63a4cc22016-06-05 14:32:14 -0500652 if (op_is_write(op))
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700653 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
Tejun Heoedcb0722012-04-01 14:38:42 -0700654 else
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700655 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
Tejun Heoedcb0722012-04-01 14:38:42 -0700656
Nikolay Borisov104b4e52017-06-20 21:01:20 +0300657 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700658
Christoph Hellwigd71d9ae2016-11-01 07:40:03 -0600659 if (op_is_sync(op))
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700660 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
661 else
662 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
663
Nikolay Borisov104b4e52017-06-20 21:01:20 +0300664 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
Tejun Heoedcb0722012-04-01 14:38:42 -0700665}
666
667/**
668 * blkg_rwstat_read - read the current values of a blkg_rwstat
669 * @rwstat: blkg_rwstat to read
670 *
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700671 * Read the current snapshot of @rwstat and return it in the aux counts.
Tejun Heoedcb0722012-04-01 14:38:42 -0700672 */
Tejun Heoc94bed892012-04-16 13:57:22 -0700673static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700674{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700675 struct blkg_rwstat result;
676 int i;
Tejun Heoedcb0722012-04-01 14:38:42 -0700677
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700678 for (i = 0; i < BLKG_RWSTAT_NR; i++)
679 atomic64_set(&result.aux_cnt[i],
680 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
681 return result;
Tejun Heoedcb0722012-04-01 14:38:42 -0700682}
683
684/**
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800685 * blkg_rwstat_total - read the total count of a blkg_rwstat
Tejun Heoedcb0722012-04-01 14:38:42 -0700686 * @rwstat: blkg_rwstat to read
687 *
688 * Return the total count of @rwstat regardless of the IO direction. This
689 * function can be called without synchronization and takes care of u64
690 * atomicity.
691 */
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800692static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700693{
694 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
695
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700696 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
697 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
Tejun Heoedcb0722012-04-01 14:38:42 -0700698}
699
700/**
701 * blkg_rwstat_reset - reset a blkg_rwstat
702 * @rwstat: blkg_rwstat to reset
703 */
704static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
705{
Tejun Heoe6269c42015-08-18 14:55:21 -0700706 int i;
707
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700708 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
709 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
Tejun Heoe6269c42015-08-18 14:55:21 -0700710 atomic64_set(&rwstat->aux_cnt[i], 0);
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700711 }
Tejun Heoedcb0722012-04-01 14:38:42 -0700712}
713
Tejun Heo16b3de62013-01-09 08:05:12 -0800714/**
Tejun Heoe6269c42015-08-18 14:55:21 -0700715 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
Tejun Heo16b3de62013-01-09 08:05:12 -0800716 * @to: the destination blkg_rwstat
717 * @from: the source
718 *
Tejun Heoe6269c42015-08-18 14:55:21 -0700719 * Add @from's count including the aux one to @to's aux count.
Tejun Heo16b3de62013-01-09 08:05:12 -0800720 */
Tejun Heoe6269c42015-08-18 14:55:21 -0700721static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
722 struct blkg_rwstat *from)
Tejun Heo16b3de62013-01-09 08:05:12 -0800723{
Arnd Bergmannddc21232018-01-16 16:01:36 +0100724 u64 sum[BLKG_RWSTAT_NR];
Tejun Heo16b3de62013-01-09 08:05:12 -0800725 int i;
726
Tejun Heo16b3de62013-01-09 08:05:12 -0800727 for (i = 0; i < BLKG_RWSTAT_NR; i++)
Arnd Bergmannddc21232018-01-16 16:01:36 +0100728 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
729
730 for (i = 0; i < BLKG_RWSTAT_NR; i++)
731 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
Tejun Heoe6269c42015-08-18 14:55:21 -0700732 &to->aux_cnt[i]);
Tejun Heo16b3de62013-01-09 08:05:12 -0800733}
734
Tejun Heoae118892015-08-18 14:55:20 -0700735#ifdef CONFIG_BLK_DEV_THROTTLING
736extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
737 struct bio *bio);
738#else
739static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
740 struct bio *bio) { return false; }
741#endif
742
743static inline bool blkcg_bio_issue_check(struct request_queue *q,
744 struct bio *bio)
745{
746 struct blkcg *blkcg;
747 struct blkcg_gq *blkg;
748 bool throtl = false;
749
750 rcu_read_lock();
751 blkcg = bio_blkcg(bio);
752
Shaohua Li007cc562017-07-12 11:49:54 -0700753 /* associate blkcg if bio hasn't attached one */
754 bio_associate_blkcg(bio, &blkcg->css);
755
Tejun Heoae118892015-08-18 14:55:20 -0700756 blkg = blkg_lookup(blkcg, q);
757 if (unlikely(!blkg)) {
758 spin_lock_irq(q->queue_lock);
759 blkg = blkg_lookup_create(blkcg, q);
760 if (IS_ERR(blkg))
761 blkg = NULL;
762 spin_unlock_irq(q->queue_lock);
763 }
764
765 throtl = blk_throtl_bio(q, blkg, bio);
766
Tejun Heo77ea7332015-08-18 14:55:24 -0700767 if (!throtl) {
768 blkg = blkg ?: q->root_blkg;
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600769 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
Tejun Heo77ea7332015-08-18 14:55:24 -0700770 bio->bi_iter.bi_size);
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600771 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
Tejun Heo77ea7332015-08-18 14:55:24 -0700772 }
773
Tejun Heoae118892015-08-18 14:55:20 -0700774 rcu_read_unlock();
775 return !throtl;
776}
777
Josef Bacikd09d8df2018-07-03 11:14:55 -0400778static inline void blkcg_use_delay(struct blkcg_gq *blkg)
779{
780 if (atomic_add_return(1, &blkg->use_delay) == 1)
781 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
782}
783
784static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
785{
786 int old = atomic_read(&blkg->use_delay);
787
788 if (old == 0)
789 return 0;
790
791 /*
792 * We do this song and dance because we can race with somebody else
793 * adding or removing delay. If we just did an atomic_dec we'd end up
794 * negative and we'd already be in trouble. We need to subtract 1 and
795 * then check to see if we were the last delay so we can drop the
796 * congestion count on the cgroup.
797 */
798 while (old) {
799 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
800 if (cur == old)
801 break;
802 old = cur;
803 }
804
805 if (old == 0)
806 return 0;
807 if (old == 1)
808 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
809 return 1;
810}
811
812static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
813{
814 int old = atomic_read(&blkg->use_delay);
815 if (!old)
816 return;
817 /* We only want 1 person clearing the congestion count for this blkg. */
818 while (old) {
819 int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
820 if (cur == old) {
821 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
822 break;
823 }
824 old = cur;
825 }
826}
827
828void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
829void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
830void blkcg_maybe_throttle_current(void);
Tejun Heo36558c82012-04-16 13:57:24 -0700831#else /* CONFIG_BLK_CGROUP */
832
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400833struct blkcg {
834};
Jens Axboe2f5ea472009-12-03 21:06:43 +0100835
Tejun Heof95a04a2012-04-16 13:57:26 -0700836struct blkg_policy_data {
837};
838
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200839struct blkcg_policy_data {
840};
841
Tejun Heo3c798392012-04-16 13:57:25 -0700842struct blkcg_gq {
Jens Axboe2f5ea472009-12-03 21:06:43 +0100843};
844
Tejun Heo3c798392012-04-16 13:57:25 -0700845struct blkcg_policy {
Vivek Goyal3e252062009-12-04 10:36:42 -0500846};
847
Tejun Heo496d5e72015-05-22 17:13:21 -0400848#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
849
Josef Bacikd09d8df2018-07-03 11:14:55 -0400850static inline void blkcg_maybe_throttle_current(void) { }
851static inline bool blk_cgroup_congested(void) { return false; }
852
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400853#ifdef CONFIG_BLOCK
854
Josef Bacikd09d8df2018-07-03 11:14:55 -0400855static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
856
Tejun Heo3c798392012-04-16 13:57:25 -0700857static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
Tejun Heo5efd6112012-03-05 13:15:12 -0800858static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
859static inline void blkcg_drain_queue(struct request_queue *q) { }
860static inline void blkcg_exit_queue(struct request_queue *q) { }
Jens Axboed5bf0292014-06-22 16:31:56 -0600861static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
Tejun Heo3c798392012-04-16 13:57:25 -0700862static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
Tejun Heoa2b16932012-04-13 13:11:33 -0700863static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700864 const struct blkcg_policy *pol) { return 0; }
Tejun Heoa2b16932012-04-13 13:11:33 -0700865static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700866 const struct blkcg_policy *pol) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500867
Tejun Heob1208b52012-06-04 20:40:57 -0700868static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
Tejun Heoa0516612012-06-26 15:05:44 -0700869
Tejun Heof95a04a2012-04-16 13:57:26 -0700870static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
871 struct blkcg_policy *pol) { return NULL; }
872static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo3c798392012-04-16 13:57:25 -0700873static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
874static inline void blkg_get(struct blkcg_gq *blkg) { }
875static inline void blkg_put(struct blkcg_gq *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200876
Tejun Heoa0516612012-06-26 15:05:44 -0700877static inline struct request_list *blk_get_rl(struct request_queue *q,
878 struct bio *bio) { return &q->root_rl; }
879static inline void blk_put_rl(struct request_list *rl) { }
880static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
881static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
882
Tejun Heoae118892015-08-18 14:55:20 -0700883static inline bool blkcg_bio_issue_check(struct request_queue *q,
884 struct bio *bio) { return true; }
885
Tejun Heoa0516612012-06-26 15:05:44 -0700886#define blk_queue_for_each_rl(rl, q) \
887 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
888
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400889#endif /* CONFIG_BLOCK */
Tejun Heo36558c82012-04-16 13:57:24 -0700890#endif /* CONFIG_BLK_CGROUP */
891#endif /* _BLK_CGROUP_H */