blob: 0839e44105bddd01e66f8e0dcf76769a6ed88de2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H
10
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070011#include <linux/kernel.h>
Miklos Szeredie4ad08f2008-04-30 00:54:37 -070012#include <linux/fs.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020013#include <linux/sched.h>
Tejun Heoa212b102015-05-22 17:13:33 -040014#include <linux/blkdev.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020015#include <linux/writeback.h>
Tejun Heo52ebea72015-05-22 17:13:37 -040016#include <linux/blk-cgroup.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040017#include <linux/backing-dev-defs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Mikulas Patocka8077c0d2013-10-14 12:14:13 -040019int __must_check bdi_init(struct backing_dev_info *bdi);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070020void bdi_destroy(struct backing_dev_info *bdi);
21
Joe Perchesd2cc4dd2012-11-29 08:37:03 -060022__printf(3, 4)
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070023int bdi_register(struct backing_dev_info *bdi, struct device *parent,
24 const char *fmt, ...);
25int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
26void bdi_unregister(struct backing_dev_info *bdi);
Christoph Hellwigb4caecd2015-01-14 10:42:32 +010027int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -060028void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
29 enum wb_reason reason);
Christoph Hellwigc5444192010-06-08 18:15:15 +020030void bdi_start_background_writeback(struct backing_dev_info *bdi);
Tejun Heof0054bb2015-05-22 17:13:30 -040031void wb_workfn(struct work_struct *work);
Tejun Heof0054bb2015-05-22 17:13:30 -040032void wb_wakeup_delayed(struct bdi_writeback *wb);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070033
Jens Axboe03ba3782009-09-09 09:08:54 +020034extern spinlock_t bdi_lock;
Jens Axboe66f3b8e2009-09-02 09:19:46 +020035extern struct list_head bdi_list;
36
Tejun Heo839a8e82013-04-01 19:08:06 -070037extern struct workqueue_struct *bdi_wq;
38
Tejun Heod6c10f12015-05-22 17:13:45 -040039static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
Jens Axboe03ba3782009-09-09 09:08:54 +020040{
Tejun Heod6c10f12015-05-22 17:13:45 -040041 return test_bit(WB_has_dirty_io, &wb->state);
Jens Axboe03ba3782009-09-09 09:08:54 +020042}
43
Tejun Heo95a46c62015-05-22 17:13:47 -040044static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
45{
46 /*
47 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
48 * any dirty wbs. See wb_update_write_bandwidth().
49 */
50 return atomic_long_read(&bdi->tot_write_bandwidth);
51}
52
Tejun Heo93f78d82015-05-22 17:13:27 -040053static inline void __add_wb_stat(struct bdi_writeback *wb,
54 enum wb_stat_item item, s64 amount)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070055{
Tejun Heo93f78d82015-05-22 17:13:27 -040056 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070057}
58
Tejun Heo93f78d82015-05-22 17:13:27 -040059static inline void __inc_wb_stat(struct bdi_writeback *wb,
60 enum wb_stat_item item)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070061{
Tejun Heo93f78d82015-05-22 17:13:27 -040062 __add_wb_stat(wb, item, 1);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070063}
64
Tejun Heo93f78d82015-05-22 17:13:27 -040065static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070066{
67 unsigned long flags;
68
69 local_irq_save(flags);
Tejun Heo93f78d82015-05-22 17:13:27 -040070 __inc_wb_stat(wb, item);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070071 local_irq_restore(flags);
72}
73
Tejun Heo93f78d82015-05-22 17:13:27 -040074static inline void __dec_wb_stat(struct bdi_writeback *wb,
75 enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070076{
Tejun Heo93f78d82015-05-22 17:13:27 -040077 __add_wb_stat(wb, item, -1);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070078}
79
Tejun Heo93f78d82015-05-22 17:13:27 -040080static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070081{
82 unsigned long flags;
83
84 local_irq_save(flags);
Tejun Heo93f78d82015-05-22 17:13:27 -040085 __dec_wb_stat(wb, item);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070086 local_irq_restore(flags);
87}
88
Tejun Heo93f78d82015-05-22 17:13:27 -040089static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070090{
Tejun Heo93f78d82015-05-22 17:13:27 -040091 return percpu_counter_read_positive(&wb->stat[item]);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070092}
93
Tejun Heo93f78d82015-05-22 17:13:27 -040094static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
95 enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070096{
Tejun Heo93f78d82015-05-22 17:13:27 -040097 return percpu_counter_sum_positive(&wb->stat[item]);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070098}
99
Tejun Heo93f78d82015-05-22 17:13:27 -0400100static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700101{
102 s64 sum;
103 unsigned long flags;
104
105 local_irq_save(flags);
Tejun Heo93f78d82015-05-22 17:13:27 -0400106 sum = __wb_stat_sum(wb, item);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700107 local_irq_restore(flags);
108
109 return sum;
110}
111
Tejun Heo93f78d82015-05-22 17:13:27 -0400112extern void wb_writeout_inc(struct bdi_writeback *wb);
Miklos Szeredidd5656e2008-04-30 00:54:37 -0700113
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700114/*
115 * maximal error of a stat counter.
116 */
Tejun Heo93f78d82015-05-22 17:13:27 -0400117static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700118{
119#ifdef CONFIG_SMP
Tejun Heo93f78d82015-05-22 17:13:27 -0400120 return nr_cpu_ids * WB_STAT_BATCH;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700121#else
122 return 1;
123#endif
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -0700124}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700126int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700127int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129/*
130 * Flags in backing_dev_info::capability
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700131 *
132 * The first three flags control whether dirty pages will contribute to the
133 * VM's accounting and whether writepages() should be called for dirty pages
134 * (something that would not, for example, be appropriate for ramfs)
135 *
136 * WARNING: these flags are closely related and should not normally be
137 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
138 * three flags into a single convenience macro.
139 *
140 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
141 * BDI_CAP_NO_WRITEBACK: Don't write pages back
142 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
Maxim Patlasov5a537482013-09-11 14:22:46 -0700143 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400144 *
145 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 */
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700147#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
148#define BDI_CAP_NO_WRITEBACK 0x00000002
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100149#define BDI_CAP_NO_ACCT_WB 0x00000004
150#define BDI_CAP_STABLE_WRITES 0x00000008
151#define BDI_CAP_STRICTLIMIT 0x00000010
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400152#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700154#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
155 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
156
Jörn Engel5129a462010-04-25 08:54:42 +0200157extern struct backing_dev_info noop_backing_dev_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159int writeback_in_progress(struct backing_dev_info *bdi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Tejun Heoa212b102015-05-22 17:13:33 -0400161static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
162{
163 struct super_block *sb;
164
165 if (!inode)
166 return &noop_backing_dev_info;
167
168 sb = inode->i_sb;
169#ifdef CONFIG_BLOCK
170 if (sb_is_blkdev_sb(sb))
171 return blk_get_backing_dev_info(I_BDEV(inode));
172#endif
173 return sb->s_bdi;
174}
175
Tejun Heoec8a6f22015-05-22 17:13:41 -0400176static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Tejun Heoec8a6f22015-05-22 17:13:41 -0400178 struct backing_dev_info *bdi = wb->bdi;
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 if (bdi->congested_fn)
Tejun Heoec8a6f22015-05-22 17:13:41 -0400181 return bdi->congested_fn(bdi->congested_data, cong_bits);
182 return wb->congested->state & cong_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183}
184
Jens Axboe8aa7e842009-07-09 14:52:32 +0200185long congestion_wait(int sync, long timeout);
Mel Gorman0e093d992010-10-26 14:21:45 -0700186long wait_iff_congested(struct zone *zone, int sync, long timeout);
Wanpeng Li3965c9a2012-07-31 16:41:52 -0700187int pdflush_proc_obsolete(struct ctl_table *table, int write,
188 void __user *buffer, size_t *lenp, loff_t *ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800190static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
191{
192 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
193}
194
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700195static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
196{
197 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
198}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700200static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
201{
202 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
203}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700205static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
206{
207 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
208 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
209 BDI_CAP_NO_WRITEBACK));
210}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700212static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
213{
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100214 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700215}
216
217static inline bool mapping_cap_account_dirty(struct address_space *mapping)
218{
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100219 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700220}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Jens Axboe03ba3782009-09-09 09:08:54 +0200222static inline int bdi_sched_wait(void *word)
223{
224 schedule();
225 return 0;
226}
227
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400228#ifdef CONFIG_CGROUP_WRITEBACK
229
Tejun Heo52ebea72015-05-22 17:13:37 -0400230struct bdi_writeback_congested *
231wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
232void wb_congested_put(struct bdi_writeback_congested *congested);
233struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
234 struct cgroup_subsys_state *memcg_css,
235 gfp_t gfp);
236void __inode_attach_wb(struct inode *inode, struct page *page);
237void wb_memcg_offline(struct mem_cgroup *memcg);
238void wb_blkcg_offline(struct blkcg *blkcg);
Tejun Heo703c2702015-05-22 17:13:44 -0400239int inode_congested(struct inode *inode, int cong_bits);
Tejun Heo52ebea72015-05-22 17:13:37 -0400240
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400241/**
242 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
243 * @inode: inode of interest
244 *
245 * cgroup writeback requires support from both the bdi and filesystem.
246 * Test whether @inode has both.
247 */
248static inline bool inode_cgwb_enabled(struct inode *inode)
249{
250 struct backing_dev_info *bdi = inode_to_bdi(inode);
251
252 return bdi_cap_account_dirty(bdi) &&
253 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
254 (inode->i_sb->s_type->fs_flags & FS_CGROUP_WRITEBACK);
255}
256
Tejun Heo52ebea72015-05-22 17:13:37 -0400257/**
258 * wb_tryget - try to increment a wb's refcount
259 * @wb: bdi_writeback to get
260 */
261static inline bool wb_tryget(struct bdi_writeback *wb)
262{
263 if (wb != &wb->bdi->wb)
264 return percpu_ref_tryget(&wb->refcnt);
265 return true;
266}
267
268/**
269 * wb_get - increment a wb's refcount
270 * @wb: bdi_writeback to get
271 */
272static inline void wb_get(struct bdi_writeback *wb)
273{
274 if (wb != &wb->bdi->wb)
275 percpu_ref_get(&wb->refcnt);
276}
277
278/**
279 * wb_put - decrement a wb's refcount
280 * @wb: bdi_writeback to put
281 */
282static inline void wb_put(struct bdi_writeback *wb)
283{
284 if (wb != &wb->bdi->wb)
285 percpu_ref_put(&wb->refcnt);
286}
287
288/**
289 * wb_find_current - find wb for %current on a bdi
290 * @bdi: bdi of interest
291 *
292 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
293 * Must be called under rcu_read_lock() which protects the returend wb.
294 * NULL if not found.
295 */
296static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
297{
298 struct cgroup_subsys_state *memcg_css;
299 struct bdi_writeback *wb;
300
301 memcg_css = task_css(current, memory_cgrp_id);
302 if (!memcg_css->parent)
303 return &bdi->wb;
304
305 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
306
307 /*
308 * %current's blkcg equals the effective blkcg of its memcg. No
309 * need to use the relatively expensive cgroup_get_e_css().
310 */
311 if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
312 return wb;
313 return NULL;
314}
315
316/**
317 * wb_get_create_current - get or create wb for %current on a bdi
318 * @bdi: bdi of interest
319 * @gfp: allocation mask
320 *
321 * Equivalent to wb_get_create() on %current's memcg. This function is
322 * called from a relatively hot path and optimizes the common cases using
323 * wb_find_current().
324 */
325static inline struct bdi_writeback *
326wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
327{
328 struct bdi_writeback *wb;
329
330 rcu_read_lock();
331 wb = wb_find_current(bdi);
332 if (wb && unlikely(!wb_tryget(wb)))
333 wb = NULL;
334 rcu_read_unlock();
335
336 if (unlikely(!wb)) {
337 struct cgroup_subsys_state *memcg_css;
338
339 memcg_css = task_get_css(current, memory_cgrp_id);
340 wb = wb_get_create(bdi, memcg_css, gfp);
341 css_put(memcg_css);
342 }
343 return wb;
344}
345
346/**
347 * inode_attach_wb - associate an inode with its wb
348 * @inode: inode of interest
349 * @page: page being dirtied (may be NULL)
350 *
351 * If @inode doesn't have its wb, associate it with the wb matching the
352 * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
353 * @inode->i_lock.
354 */
355static inline void inode_attach_wb(struct inode *inode, struct page *page)
356{
357 if (!inode->i_wb)
358 __inode_attach_wb(inode, page);
359}
360
361/**
362 * inode_detach_wb - disassociate an inode from its wb
363 * @inode: inode of interest
364 *
365 * @inode is being freed. Detach from its wb.
366 */
367static inline void inode_detach_wb(struct inode *inode)
368{
369 if (inode->i_wb) {
370 wb_put(inode->i_wb);
371 inode->i_wb = NULL;
372 }
373}
374
375/**
376 * inode_to_wb - determine the wb of an inode
377 * @inode: inode of interest
378 *
379 * Returns the wb @inode is currently associated with.
380 */
381static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
382{
383 return inode->i_wb;
384}
385
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400386#else /* CONFIG_CGROUP_WRITEBACK */
387
388static inline bool inode_cgwb_enabled(struct inode *inode)
389{
390 return false;
391}
392
Tejun Heo52ebea72015-05-22 17:13:37 -0400393static inline struct bdi_writeback_congested *
394wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
395{
396 return bdi->wb.congested;
397}
398
399static inline void wb_congested_put(struct bdi_writeback_congested *congested)
400{
401}
402
403static inline bool wb_tryget(struct bdi_writeback *wb)
404{
405 return true;
406}
407
408static inline void wb_get(struct bdi_writeback *wb)
409{
410}
411
412static inline void wb_put(struct bdi_writeback *wb)
413{
414}
415
416static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
417{
418 return &bdi->wb;
419}
420
421static inline struct bdi_writeback *
422wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
423{
424 return &bdi->wb;
425}
426
427static inline void inode_attach_wb(struct inode *inode, struct page *page)
428{
429}
430
431static inline void inode_detach_wb(struct inode *inode)
432{
433}
434
435static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
436{
437 return &inode_to_bdi(inode)->wb;
438}
439
440static inline void wb_memcg_offline(struct mem_cgroup *memcg)
441{
442}
443
444static inline void wb_blkcg_offline(struct blkcg *blkcg)
445{
446}
447
Tejun Heo703c2702015-05-22 17:13:44 -0400448static inline int inode_congested(struct inode *inode, int cong_bits)
449{
450 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
451}
452
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400453#endif /* CONFIG_CGROUP_WRITEBACK */
454
Tejun Heo703c2702015-05-22 17:13:44 -0400455static inline int inode_read_congested(struct inode *inode)
456{
457 return inode_congested(inode, 1 << WB_sync_congested);
458}
459
460static inline int inode_write_congested(struct inode *inode)
461{
462 return inode_congested(inode, 1 << WB_async_congested);
463}
464
465static inline int inode_rw_congested(struct inode *inode)
466{
467 return inode_congested(inode, (1 << WB_sync_congested) |
468 (1 << WB_async_congested));
469}
470
Tejun Heoec8a6f22015-05-22 17:13:41 -0400471static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
472{
473 return wb_congested(&bdi->wb, cong_bits);
474}
475
476static inline int bdi_read_congested(struct backing_dev_info *bdi)
477{
478 return bdi_congested(bdi, 1 << WB_sync_congested);
479}
480
481static inline int bdi_write_congested(struct backing_dev_info *bdi)
482{
483 return bdi_congested(bdi, 1 << WB_async_congested);
484}
485
486static inline int bdi_rw_congested(struct backing_dev_info *bdi)
487{
488 return bdi_congested(bdi, (1 << WB_sync_congested) |
489 (1 << WB_async_congested));
490}
491
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400492#endif /* _LINUX_BACKING_DEV_H */