blob: 16621579a3db313bf4a5f315a732ae6121e8b8c2 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * include/linux/backing-dev.h
4 *
5 * low-level device information and state which is propagated up through
6 * to high-level code.
7 */
8
9#ifndef _LINUX_BACKING_DEV_H
10#define _LINUX_BACKING_DEV_H
11
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070012#include <linux/kernel.h>
Miklos Szeredie4ad08f2008-04-30 00:54:37 -070013#include <linux/fs.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020014#include <linux/sched.h>
Tejun Heoa212b102015-05-22 17:13:33 -040015#include <linux/blkdev.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020016#include <linux/writeback.h>
Tejun Heo52ebea72015-05-22 17:13:37 -040017#include <linux/blk-cgroup.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040018#include <linux/backing-dev-defs.h>
Tejun Heoa13f35e2015-07-02 08:44:34 -060019#include <linux/slab.h>
Christoph Hellwigde1414a2015-01-14 10:42:36 +010020
Jan Karad03f6cd2017-02-02 15:56:51 +010021static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
22{
23 kref_get(&bdi->refcnt);
24 return bdi;
25}
26
27void bdi_put(struct backing_dev_info *bdi);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070028
Jan Kara7c4cc302017-04-12 12:24:49 +020029__printf(2, 3)
30int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
32 va_list args);
Dan Williamsdf08c322016-07-31 11:15:13 -070033int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
Tejun Heob02176f2015-09-08 12:20:22 -040034void bdi_unregister(struct backing_dev_info *bdi);
35
Jan Karad03f6cd2017-02-02 15:56:51 +010036struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
Jan Karabaf7a612017-04-12 12:24:25 +020037static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
38{
39 return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
40}
Tejun Heob02176f2015-09-08 12:20:22 -040041
Tejun Heoc00ddad2015-05-22 17:13:51 -040042void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
43 bool range_cyclic, enum wb_reason reason);
Tejun Heo9ecf48662015-05-22 17:13:54 -040044void wb_start_background_writeback(struct bdi_writeback *wb);
Tejun Heof0054bb2015-05-22 17:13:30 -040045void wb_workfn(struct work_struct *work);
Tejun Heof0054bb2015-05-22 17:13:30 -040046void wb_wakeup_delayed(struct bdi_writeback *wb);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070047
Jens Axboe03ba3782009-09-09 09:08:54 +020048extern spinlock_t bdi_lock;
Jens Axboe66f3b8e2009-09-02 09:19:46 +020049extern struct list_head bdi_list;
50
Tejun Heo839a8e82013-04-01 19:08:06 -070051extern struct workqueue_struct *bdi_wq;
52
Tejun Heod6c10f12015-05-22 17:13:45 -040053static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
Jens Axboe03ba3782009-09-09 09:08:54 +020054{
Tejun Heod6c10f12015-05-22 17:13:45 -040055 return test_bit(WB_has_dirty_io, &wb->state);
Jens Axboe03ba3782009-09-09 09:08:54 +020056}
57
Tejun Heo95a46c62015-05-22 17:13:47 -040058static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070059{
Tejun Heo95a46c62015-05-22 17:13:47 -040060 /*
61 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
62 * any dirty wbs. See wb_update_write_bandwidth().
63 */
64 return atomic_long_read(&bdi->tot_write_bandwidth);
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070065}
66
Tejun Heo93f78d82015-05-22 17:13:27 -040067static inline void __add_wb_stat(struct bdi_writeback *wb,
68 enum wb_stat_item item, s64 amount)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070069{
Nikolay Borisov104b4e52017-06-20 21:01:20 +030070 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070071}
72
Nikolay Borisov3e8f3992017-07-12 14:37:51 -070073static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070074{
Tejun Heo93f78d82015-05-22 17:13:27 -040075 __add_wb_stat(wb, item, 1);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070076}
77
Tejun Heo93f78d82015-05-22 17:13:27 -040078static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070079{
Nikolay Borisov3e8f3992017-07-12 14:37:51 -070080 __add_wb_stat(wb, item, -1);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070081}
82
Tejun Heo93f78d82015-05-22 17:13:27 -040083static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070084{
Tejun Heo93f78d82015-05-22 17:13:27 -040085 return percpu_counter_read_positive(&wb->stat[item]);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070086}
87
Tejun Heo93f78d82015-05-22 17:13:27 -040088static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070089{
Nikolay Borisove3d39102017-07-10 15:49:35 -070090 return percpu_counter_sum_positive(&wb->stat[item]);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070091}
92
Tejun Heo93f78d82015-05-22 17:13:27 -040093extern void wb_writeout_inc(struct bdi_writeback *wb);
Miklos Szeredidd5656e2008-04-30 00:54:37 -070094
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070095/*
96 * maximal error of a stat counter.
97 */
Tejun Heo93f78d82015-05-22 17:13:27 -040098static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070099{
100#ifdef CONFIG_SMP
Tejun Heo93f78d82015-05-22 17:13:27 -0400101 return nr_cpu_ids * WB_STAT_BATCH;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700102#else
103 return 1;
104#endif
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -0700105}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700107int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700108int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110/*
111 * Flags in backing_dev_info::capability
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700112 *
113 * The first three flags control whether dirty pages will contribute to the
114 * VM's accounting and whether writepages() should be called for dirty pages
115 * (something that would not, for example, be appropriate for ramfs)
116 *
117 * WARNING: these flags are closely related and should not normally be
118 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
119 * three flags into a single convenience macro.
120 *
121 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
122 * BDI_CAP_NO_WRITEBACK: Don't write pages back
123 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
Maxim Patlasov5a537482013-09-11 14:22:46 -0700124 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400125 *
126 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 */
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700128#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
129#define BDI_CAP_NO_WRITEBACK 0x00000002
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100130#define BDI_CAP_NO_ACCT_WB 0x00000004
131#define BDI_CAP_STABLE_WRITES 0x00000008
132#define BDI_CAP_STRICTLIMIT 0x00000010
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400133#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700135#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
136 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
137
Jörn Engel5129a462010-04-25 08:54:42 +0200138extern struct backing_dev_info noop_backing_dev_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Tejun Heobc058732015-05-22 17:13:53 -0400140/**
141 * writeback_in_progress - determine whether there is writeback in progress
142 * @wb: bdi_writeback of interest
143 *
144 * Determine whether there is writeback waiting to be handled against a
145 * bdi_writeback.
146 */
147static inline bool writeback_in_progress(struct bdi_writeback *wb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148{
Tejun Heobc058732015-05-22 17:13:53 -0400149 return test_bit(WB_writeback_running, &wb->state);
150}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Tejun Heoa212b102015-05-22 17:13:33 -0400152static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
153{
154 struct super_block *sb;
155
156 if (!inode)
157 return &noop_backing_dev_info;
158
159 sb = inode->i_sb;
160#ifdef CONFIG_BLOCK
161 if (sb_is_blkdev_sb(sb))
Jan Karaefa7c9f2017-02-02 15:56:53 +0100162 return I_BDEV(inode)->bd_bdi;
Tejun Heoa212b102015-05-22 17:13:33 -0400163#endif
164 return sb->s_bdi;
165}
166
Tejun Heoec8a6f22015-05-22 17:13:41 -0400167static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
Tejun Heoec8a6f22015-05-22 17:13:41 -0400169 struct backing_dev_info *bdi = wb->bdi;
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 if (bdi->congested_fn)
Tejun Heoec8a6f22015-05-22 17:13:41 -0400172 return bdi->congested_fn(bdi->congested_data, cong_bits);
173 return wb->congested->state & cong_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
Jens Axboe8aa7e842009-07-09 14:52:32 +0200176long congestion_wait(int sync, long timeout);
Mel Gorman599d0c92016-07-28 15:45:31 -0700177long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
Wanpeng Li3965c9a2012-07-31 16:41:52 -0700178int pdflush_proc_obsolete(struct ctl_table *table, int write,
179 void __user *buffer, size_t *lenp, loff_t *ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800181static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
182{
183 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
184}
185
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700186static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
187{
188 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
189}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700191static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
192{
193 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
194}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700196static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
197{
198 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
199 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
200 BDI_CAP_NO_WRITEBACK));
201}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700203static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
204{
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100205 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700206}
207
208static inline bool mapping_cap_account_dirty(struct address_space *mapping)
209{
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100210 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700211}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Jens Axboe03ba3782009-09-09 09:08:54 +0200213static inline int bdi_sched_wait(void *word)
214{
215 schedule();
216 return 0;
217}
218
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400219#ifdef CONFIG_CGROUP_WRITEBACK
220
Tejun Heo52ebea72015-05-22 17:13:37 -0400221struct bdi_writeback_congested *
222wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
223void wb_congested_put(struct bdi_writeback_congested *congested);
224struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
225 struct cgroup_subsys_state *memcg_css,
226 gfp_t gfp);
Tejun Heo52ebea72015-05-22 17:13:37 -0400227void wb_memcg_offline(struct mem_cgroup *memcg);
228void wb_blkcg_offline(struct blkcg *blkcg);
Tejun Heo703c2702015-05-22 17:13:44 -0400229int inode_congested(struct inode *inode, int cong_bits);
Tejun Heo52ebea72015-05-22 17:13:37 -0400230
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400231/**
232 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
233 * @inode: inode of interest
234 *
235 * cgroup writeback requires support from both the bdi and filesystem.
Tejun Heo9badce02015-09-23 17:07:29 -0400236 * Also, both memcg and iocg have to be on the default hierarchy. Test
237 * whether all conditions are met.
238 *
239 * Note that the test result may change dynamically on the same inode
240 * depending on how memcg and iocg are configured.
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400241 */
242static inline bool inode_cgwb_enabled(struct inode *inode)
243{
244 struct backing_dev_info *bdi = inode_to_bdi(inode);
245
Tejun Heoc0522902015-09-24 16:59:19 -0400246 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
247 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
Tejun Heo9badce02015-09-23 17:07:29 -0400248 bdi_cap_account_dirty(bdi) &&
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400249 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
Tejun Heo46b15ca2015-06-16 18:48:31 -0400250 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400251}
252
Tejun Heo52ebea72015-05-22 17:13:37 -0400253/**
Tejun Heo52ebea72015-05-22 17:13:37 -0400254 * wb_find_current - find wb for %current on a bdi
255 * @bdi: bdi of interest
256 *
257 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
258 * Must be called under rcu_read_lock() which protects the returend wb.
259 * NULL if not found.
260 */
261static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
262{
263 struct cgroup_subsys_state *memcg_css;
264 struct bdi_writeback *wb;
265
266 memcg_css = task_css(current, memory_cgrp_id);
267 if (!memcg_css->parent)
268 return &bdi->wb;
269
270 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
271
272 /*
273 * %current's blkcg equals the effective blkcg of its memcg. No
274 * need to use the relatively expensive cgroup_get_e_css().
275 */
Tejun Heoc165b3e2015-08-18 14:55:29 -0700276 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
Tejun Heo52ebea72015-05-22 17:13:37 -0400277 return wb;
278 return NULL;
279}
280
281/**
282 * wb_get_create_current - get or create wb for %current on a bdi
283 * @bdi: bdi of interest
284 * @gfp: allocation mask
285 *
286 * Equivalent to wb_get_create() on %current's memcg. This function is
287 * called from a relatively hot path and optimizes the common cases using
288 * wb_find_current().
289 */
290static inline struct bdi_writeback *
291wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
292{
293 struct bdi_writeback *wb;
294
295 rcu_read_lock();
296 wb = wb_find_current(bdi);
297 if (wb && unlikely(!wb_tryget(wb)))
298 wb = NULL;
299 rcu_read_unlock();
300
301 if (unlikely(!wb)) {
302 struct cgroup_subsys_state *memcg_css;
303
304 memcg_css = task_get_css(current, memory_cgrp_id);
305 wb = wb_get_create(bdi, memcg_css, gfp);
306 css_put(memcg_css);
307 }
308 return wb;
309}
310
311/**
Tejun Heoaaa2cac2015-05-28 14:50:55 -0400312 * inode_to_wb_is_valid - test whether an inode has a wb associated
313 * @inode: inode of interest
314 *
315 * Returns %true if @inode has a wb associated. May be called without any
316 * locking.
317 */
318static inline bool inode_to_wb_is_valid(struct inode *inode)
319{
320 return inode->i_wb;
321}
322
323/**
Tejun Heo52ebea72015-05-22 17:13:37 -0400324 * inode_to_wb - determine the wb of an inode
325 * @inode: inode of interest
326 *
Tejun Heoaaa2cac2015-05-28 14:50:55 -0400327 * Returns the wb @inode is currently associated with. The caller must be
328 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
329 * associated wb's list_lock.
Tejun Heo52ebea72015-05-22 17:13:37 -0400330 */
331static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
332{
Tejun Heoaaa2cac2015-05-28 14:50:55 -0400333#ifdef CONFIG_LOCKDEP
334 WARN_ON_ONCE(debug_locks &&
335 (!lockdep_is_held(&inode->i_lock) &&
336 !lockdep_is_held(&inode->i_mapping->tree_lock) &&
337 !lockdep_is_held(&inode->i_wb->list_lock)));
338#endif
Tejun Heo52ebea72015-05-22 17:13:37 -0400339 return inode->i_wb;
340}
341
Tejun Heo682aa8e2015-05-28 14:50:53 -0400342/**
343 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
344 * @inode: target inode
345 * @lockedp: temp bool output param, to be passed to the end function
346 *
347 * The caller wants to access the wb associated with @inode but isn't
348 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
349 * function determines the wb associated with @inode and ensures that the
350 * association doesn't change until the transaction is finished with
351 * unlocked_inode_to_wb_end().
352 *
353 * The caller must call unlocked_inode_to_wb_end() with *@lockdep
354 * afterwards and can't sleep during transaction. IRQ may or may not be
355 * disabled on return.
356 */
357static inline struct bdi_writeback *
358unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
359{
360 rcu_read_lock();
361
362 /*
363 * Paired with store_release in inode_switch_wb_work_fn() and
364 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
365 */
366 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
367
368 if (unlikely(*lockedp))
369 spin_lock_irq(&inode->i_mapping->tree_lock);
Tejun Heoaaa2cac2015-05-28 14:50:55 -0400370
371 /*
372 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
373 * inode_to_wb() will bark. Deref directly.
374 */
375 return inode->i_wb;
Tejun Heo682aa8e2015-05-28 14:50:53 -0400376}
377
378/**
379 * unlocked_inode_to_wb_end - end inode wb access transaction
380 * @inode: target inode
381 * @locked: *@lockedp from unlocked_inode_to_wb_begin()
382 */
383static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
384{
385 if (unlikely(locked))
386 spin_unlock_irq(&inode->i_mapping->tree_lock);
387
388 rcu_read_unlock();
389}
390
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400391#else /* CONFIG_CGROUP_WRITEBACK */
392
393static inline bool inode_cgwb_enabled(struct inode *inode)
394{
395 return false;
396}
397
Tejun Heo52ebea72015-05-22 17:13:37 -0400398static inline struct bdi_writeback_congested *
399wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
400{
Tejun Heoa13f35e2015-07-02 08:44:34 -0600401 atomic_inc(&bdi->wb_congested->refcnt);
402 return bdi->wb_congested;
Tejun Heo52ebea72015-05-22 17:13:37 -0400403}
404
405static inline void wb_congested_put(struct bdi_writeback_congested *congested)
406{
Tejun Heoa13f35e2015-07-02 08:44:34 -0600407 if (atomic_dec_and_test(&congested->refcnt))
408 kfree(congested);
Tejun Heo52ebea72015-05-22 17:13:37 -0400409}
410
Tejun Heo52ebea72015-05-22 17:13:37 -0400411static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
412{
413 return &bdi->wb;
414}
415
416static inline struct bdi_writeback *
417wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
418{
419 return &bdi->wb;
420}
421
Tejun Heoaaa2cac2015-05-28 14:50:55 -0400422static inline bool inode_to_wb_is_valid(struct inode *inode)
423{
424 return true;
425}
426
Tejun Heo52ebea72015-05-22 17:13:37 -0400427static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
428{
429 return &inode_to_bdi(inode)->wb;
430}
431
Tejun Heo682aa8e2015-05-28 14:50:53 -0400432static inline struct bdi_writeback *
433unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
434{
435 return inode_to_wb(inode);
436}
437
438static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
439{
440}
441
Tejun Heo52ebea72015-05-22 17:13:37 -0400442static inline void wb_memcg_offline(struct mem_cgroup *memcg)
443{
444}
445
446static inline void wb_blkcg_offline(struct blkcg *blkcg)
447{
448}
449
Tejun Heo703c2702015-05-22 17:13:44 -0400450static inline int inode_congested(struct inode *inode, int cong_bits)
451{
452 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
453}
454
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400455#endif /* CONFIG_CGROUP_WRITEBACK */
456
Tejun Heo703c2702015-05-22 17:13:44 -0400457static inline int inode_read_congested(struct inode *inode)
458{
459 return inode_congested(inode, 1 << WB_sync_congested);
460}
461
462static inline int inode_write_congested(struct inode *inode)
463{
464 return inode_congested(inode, 1 << WB_async_congested);
465}
466
467static inline int inode_rw_congested(struct inode *inode)
468{
469 return inode_congested(inode, (1 << WB_sync_congested) |
470 (1 << WB_async_congested));
471}
472
Tejun Heoec8a6f22015-05-22 17:13:41 -0400473static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
474{
475 return wb_congested(&bdi->wb, cong_bits);
476}
477
478static inline int bdi_read_congested(struct backing_dev_info *bdi)
479{
480 return bdi_congested(bdi, 1 << WB_sync_congested);
481}
482
483static inline int bdi_write_congested(struct backing_dev_info *bdi)
484{
485 return bdi_congested(bdi, 1 << WB_async_congested);
486}
487
488static inline int bdi_rw_congested(struct backing_dev_info *bdi)
489{
490 return bdi_congested(bdi, (1 << WB_sync_congested) |
491 (1 << WB_async_congested));
492}
493
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400494#endif /* _LINUX_BACKING_DEV_H */