blob: 73ffa32e58eeabc5ab1f3889360a5fb663e55357 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H
10
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070011#include <linux/kernel.h>
Miklos Szeredie4ad08f2008-04-30 00:54:37 -070012#include <linux/fs.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020013#include <linux/sched.h>
Tejun Heoa212b102015-05-22 17:13:33 -040014#include <linux/blkdev.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020015#include <linux/writeback.h>
Tejun Heo52ebea72015-05-22 17:13:37 -040016#include <linux/blk-cgroup.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040017#include <linux/backing-dev-defs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Mikulas Patocka8077c0d2013-10-14 12:14:13 -040019int __must_check bdi_init(struct backing_dev_info *bdi);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070020void bdi_destroy(struct backing_dev_info *bdi);
21
Joe Perchesd2cc4dd2012-11-29 08:37:03 -060022__printf(3, 4)
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070023int bdi_register(struct backing_dev_info *bdi, struct device *parent,
24 const char *fmt, ...);
25int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
26void bdi_unregister(struct backing_dev_info *bdi);
Christoph Hellwigb4caecd2015-01-14 10:42:32 +010027int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
Tejun Heoc00ddad2015-05-22 17:13:51 -040028void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
29 bool range_cyclic, enum wb_reason reason);
Tejun Heo9ecf48662015-05-22 17:13:54 -040030void wb_start_background_writeback(struct bdi_writeback *wb);
Tejun Heof0054bb2015-05-22 17:13:30 -040031void wb_workfn(struct work_struct *work);
Tejun Heof0054bb2015-05-22 17:13:30 -040032void wb_wakeup_delayed(struct bdi_writeback *wb);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070033
Jens Axboe03ba3782009-09-09 09:08:54 +020034extern spinlock_t bdi_lock;
Jens Axboe66f3b8e2009-09-02 09:19:46 +020035extern struct list_head bdi_list;
36
Tejun Heo839a8e82013-04-01 19:08:06 -070037extern struct workqueue_struct *bdi_wq;
38
Tejun Heod6c10f12015-05-22 17:13:45 -040039static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
Jens Axboe03ba3782009-09-09 09:08:54 +020040{
Tejun Heod6c10f12015-05-22 17:13:45 -040041 return test_bit(WB_has_dirty_io, &wb->state);
Jens Axboe03ba3782009-09-09 09:08:54 +020042}
43
Tejun Heo95a46c62015-05-22 17:13:47 -040044static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
45{
46 /*
47 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
48 * any dirty wbs. See wb_update_write_bandwidth().
49 */
50 return atomic_long_read(&bdi->tot_write_bandwidth);
51}
52
Tejun Heo93f78d82015-05-22 17:13:27 -040053static inline void __add_wb_stat(struct bdi_writeback *wb,
54 enum wb_stat_item item, s64 amount)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070055{
Tejun Heo93f78d82015-05-22 17:13:27 -040056 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070057}
58
Tejun Heo93f78d82015-05-22 17:13:27 -040059static inline void __inc_wb_stat(struct bdi_writeback *wb,
60 enum wb_stat_item item)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070061{
Tejun Heo93f78d82015-05-22 17:13:27 -040062 __add_wb_stat(wb, item, 1);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070063}
64
Tejun Heo93f78d82015-05-22 17:13:27 -040065static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070066{
67 unsigned long flags;
68
69 local_irq_save(flags);
Tejun Heo93f78d82015-05-22 17:13:27 -040070 __inc_wb_stat(wb, item);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070071 local_irq_restore(flags);
72}
73
Tejun Heo93f78d82015-05-22 17:13:27 -040074static inline void __dec_wb_stat(struct bdi_writeback *wb,
75 enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070076{
Tejun Heo93f78d82015-05-22 17:13:27 -040077 __add_wb_stat(wb, item, -1);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070078}
79
Tejun Heo93f78d82015-05-22 17:13:27 -040080static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070081{
82 unsigned long flags;
83
84 local_irq_save(flags);
Tejun Heo93f78d82015-05-22 17:13:27 -040085 __dec_wb_stat(wb, item);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070086 local_irq_restore(flags);
87}
88
Tejun Heo93f78d82015-05-22 17:13:27 -040089static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070090{
Tejun Heo93f78d82015-05-22 17:13:27 -040091 return percpu_counter_read_positive(&wb->stat[item]);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070092}
93
Tejun Heo93f78d82015-05-22 17:13:27 -040094static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
95 enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070096{
Tejun Heo93f78d82015-05-22 17:13:27 -040097 return percpu_counter_sum_positive(&wb->stat[item]);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -070098}
99
Tejun Heo93f78d82015-05-22 17:13:27 -0400100static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700101{
102 s64 sum;
103 unsigned long flags;
104
105 local_irq_save(flags);
Tejun Heo93f78d82015-05-22 17:13:27 -0400106 sum = __wb_stat_sum(wb, item);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700107 local_irq_restore(flags);
108
109 return sum;
110}
111
Tejun Heo93f78d82015-05-22 17:13:27 -0400112extern void wb_writeout_inc(struct bdi_writeback *wb);
Miklos Szeredidd5656e2008-04-30 00:54:37 -0700113
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700114/*
115 * maximal error of a stat counter.
116 */
Tejun Heo93f78d82015-05-22 17:13:27 -0400117static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700118{
119#ifdef CONFIG_SMP
Tejun Heo93f78d82015-05-22 17:13:27 -0400120 return nr_cpu_ids * WB_STAT_BATCH;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700121#else
122 return 1;
123#endif
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -0700124}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700126int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700127int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129/*
130 * Flags in backing_dev_info::capability
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700131 *
132 * The first three flags control whether dirty pages will contribute to the
133 * VM's accounting and whether writepages() should be called for dirty pages
134 * (something that would not, for example, be appropriate for ramfs)
135 *
136 * WARNING: these flags are closely related and should not normally be
137 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
138 * three flags into a single convenience macro.
139 *
140 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
141 * BDI_CAP_NO_WRITEBACK: Don't write pages back
142 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
Maxim Patlasov5a537482013-09-11 14:22:46 -0700143 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400144 *
145 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 */
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700147#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
148#define BDI_CAP_NO_WRITEBACK 0x00000002
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100149#define BDI_CAP_NO_ACCT_WB 0x00000004
150#define BDI_CAP_STABLE_WRITES 0x00000008
151#define BDI_CAP_STRICTLIMIT 0x00000010
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400152#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700154#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
155 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
156
Jörn Engel5129a462010-04-25 08:54:42 +0200157extern struct backing_dev_info noop_backing_dev_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Tejun Heobc058732015-05-22 17:13:53 -0400159/**
160 * writeback_in_progress - determine whether there is writeback in progress
161 * @wb: bdi_writeback of interest
162 *
163 * Determine whether there is writeback waiting to be handled against a
164 * bdi_writeback.
165 */
166static inline bool writeback_in_progress(struct bdi_writeback *wb)
167{
168 return test_bit(WB_writeback_running, &wb->state);
169}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Tejun Heoa212b102015-05-22 17:13:33 -0400171static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
172{
173 struct super_block *sb;
174
175 if (!inode)
176 return &noop_backing_dev_info;
177
178 sb = inode->i_sb;
179#ifdef CONFIG_BLOCK
180 if (sb_is_blkdev_sb(sb))
181 return blk_get_backing_dev_info(I_BDEV(inode));
182#endif
183 return sb->s_bdi;
184}
185
Tejun Heoec8a6f22015-05-22 17:13:41 -0400186static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
Tejun Heoec8a6f22015-05-22 17:13:41 -0400188 struct backing_dev_info *bdi = wb->bdi;
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 if (bdi->congested_fn)
Tejun Heoec8a6f22015-05-22 17:13:41 -0400191 return bdi->congested_fn(bdi->congested_data, cong_bits);
192 return wb->congested->state & cong_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
Jens Axboe8aa7e842009-07-09 14:52:32 +0200195long congestion_wait(int sync, long timeout);
Mel Gorman0e093d992010-10-26 14:21:45 -0700196long wait_iff_congested(struct zone *zone, int sync, long timeout);
Wanpeng Li3965c9a2012-07-31 16:41:52 -0700197int pdflush_proc_obsolete(struct ctl_table *table, int write,
198 void __user *buffer, size_t *lenp, loff_t *ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800200static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
201{
202 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
203}
204
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700205static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
206{
207 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
208}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700210static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
211{
212 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
213}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700215static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
216{
217 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
218 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
219 BDI_CAP_NO_WRITEBACK));
220}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700222static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
223{
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100224 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700225}
226
227static inline bool mapping_cap_account_dirty(struct address_space *mapping)
228{
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100229 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
Miklos Szeredie4ad08f2008-04-30 00:54:37 -0700230}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Jens Axboe03ba3782009-09-09 09:08:54 +0200232static inline int bdi_sched_wait(void *word)
233{
234 schedule();
235 return 0;
236}
237
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400238#ifdef CONFIG_CGROUP_WRITEBACK
239
Tejun Heo52ebea72015-05-22 17:13:37 -0400240struct bdi_writeback_congested *
241wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
242void wb_congested_put(struct bdi_writeback_congested *congested);
243struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
244 struct cgroup_subsys_state *memcg_css,
245 gfp_t gfp);
Tejun Heo52ebea72015-05-22 17:13:37 -0400246void wb_memcg_offline(struct mem_cgroup *memcg);
247void wb_blkcg_offline(struct blkcg *blkcg);
Tejun Heo703c2702015-05-22 17:13:44 -0400248int inode_congested(struct inode *inode, int cong_bits);
Tejun Heo52ebea72015-05-22 17:13:37 -0400249
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400250/**
251 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
252 * @inode: inode of interest
253 *
254 * cgroup writeback requires support from both the bdi and filesystem.
255 * Test whether @inode has both.
256 */
257static inline bool inode_cgwb_enabled(struct inode *inode)
258{
259 struct backing_dev_info *bdi = inode_to_bdi(inode);
260
261 return bdi_cap_account_dirty(bdi) &&
262 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
263 (inode->i_sb->s_type->fs_flags & FS_CGROUP_WRITEBACK);
264}
265
Tejun Heo52ebea72015-05-22 17:13:37 -0400266/**
Tejun Heo52ebea72015-05-22 17:13:37 -0400267 * wb_find_current - find wb for %current on a bdi
268 * @bdi: bdi of interest
269 *
270 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
271 * Must be called under rcu_read_lock() which protects the returend wb.
272 * NULL if not found.
273 */
274static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
275{
276 struct cgroup_subsys_state *memcg_css;
277 struct bdi_writeback *wb;
278
279 memcg_css = task_css(current, memory_cgrp_id);
280 if (!memcg_css->parent)
281 return &bdi->wb;
282
283 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
284
285 /*
286 * %current's blkcg equals the effective blkcg of its memcg. No
287 * need to use the relatively expensive cgroup_get_e_css().
288 */
289 if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
290 return wb;
291 return NULL;
292}
293
294/**
295 * wb_get_create_current - get or create wb for %current on a bdi
296 * @bdi: bdi of interest
297 * @gfp: allocation mask
298 *
299 * Equivalent to wb_get_create() on %current's memcg. This function is
300 * called from a relatively hot path and optimizes the common cases using
301 * wb_find_current().
302 */
303static inline struct bdi_writeback *
304wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
305{
306 struct bdi_writeback *wb;
307
308 rcu_read_lock();
309 wb = wb_find_current(bdi);
310 if (wb && unlikely(!wb_tryget(wb)))
311 wb = NULL;
312 rcu_read_unlock();
313
314 if (unlikely(!wb)) {
315 struct cgroup_subsys_state *memcg_css;
316
317 memcg_css = task_get_css(current, memory_cgrp_id);
318 wb = wb_get_create(bdi, memcg_css, gfp);
319 css_put(memcg_css);
320 }
321 return wb;
322}
323
324/**
Tejun Heo52ebea72015-05-22 17:13:37 -0400325 * inode_to_wb - determine the wb of an inode
326 * @inode: inode of interest
327 *
328 * Returns the wb @inode is currently associated with.
329 */
330static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
331{
332 return inode->i_wb;
333}
334
Tejun Heo682aa8e2015-05-28 14:50:53 -0400335/**
336 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
337 * @inode: target inode
338 * @lockedp: temp bool output param, to be passed to the end function
339 *
340 * The caller wants to access the wb associated with @inode but isn't
341 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
342 * function determines the wb associated with @inode and ensures that the
343 * association doesn't change until the transaction is finished with
344 * unlocked_inode_to_wb_end().
345 *
346 * The caller must call unlocked_inode_to_wb_end() with *@lockdep
347 * afterwards and can't sleep during transaction. IRQ may or may not be
348 * disabled on return.
349 */
350static inline struct bdi_writeback *
351unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
352{
353 rcu_read_lock();
354
355 /*
356 * Paired with store_release in inode_switch_wb_work_fn() and
357 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
358 */
359 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
360
361 if (unlikely(*lockedp))
362 spin_lock_irq(&inode->i_mapping->tree_lock);
363 return inode_to_wb(inode);
364}
365
366/**
367 * unlocked_inode_to_wb_end - end inode wb access transaction
368 * @inode: target inode
369 * @locked: *@lockedp from unlocked_inode_to_wb_begin()
370 */
371static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
372{
373 if (unlikely(locked))
374 spin_unlock_irq(&inode->i_mapping->tree_lock);
375
376 rcu_read_unlock();
377}
378
Tejun Heoebe41ab2015-05-22 17:13:50 -0400379struct wb_iter {
380 int start_blkcg_id;
381 struct radix_tree_iter tree_iter;
382 void **slot;
383};
384
385static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
386 struct backing_dev_info *bdi)
387{
388 struct radix_tree_iter *titer = &iter->tree_iter;
389
390 WARN_ON_ONCE(!rcu_read_lock_held());
391
392 if (iter->start_blkcg_id >= 0) {
393 iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
394 iter->start_blkcg_id = -1;
395 } else {
396 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
397 }
398
399 if (!iter->slot)
400 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
401 if (iter->slot)
402 return *iter->slot;
403 return NULL;
404}
405
406static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
407 struct backing_dev_info *bdi,
408 int start_blkcg_id)
409{
410 iter->start_blkcg_id = start_blkcg_id;
411
412 if (start_blkcg_id)
413 return __wb_iter_next(iter, bdi);
414 else
415 return &bdi->wb;
416}
417
418/**
419 * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
420 * @wb_cur: cursor struct bdi_writeback pointer
421 * @bdi: bdi to walk wb's of
422 * @iter: pointer to struct wb_iter to be used as iteration buffer
423 * @start_blkcg_id: blkcg ID to start iteration from
424 *
425 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
426 * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
427 * to be used as temp storage during iteration. rcu_read_lock() must be
428 * held throughout iteration.
429 */
430#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
431 for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
432 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
433
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400434#else /* CONFIG_CGROUP_WRITEBACK */
435
436static inline bool inode_cgwb_enabled(struct inode *inode)
437{
438 return false;
439}
440
Tejun Heo52ebea72015-05-22 17:13:37 -0400441static inline struct bdi_writeback_congested *
442wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
443{
444 return bdi->wb.congested;
445}
446
447static inline void wb_congested_put(struct bdi_writeback_congested *congested)
448{
449}
450
Tejun Heo52ebea72015-05-22 17:13:37 -0400451static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
452{
453 return &bdi->wb;
454}
455
456static inline struct bdi_writeback *
457wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
458{
459 return &bdi->wb;
460}
461
Tejun Heo52ebea72015-05-22 17:13:37 -0400462static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
463{
464 return &inode_to_bdi(inode)->wb;
465}
466
Tejun Heo682aa8e2015-05-28 14:50:53 -0400467static inline struct bdi_writeback *
468unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
469{
470 return inode_to_wb(inode);
471}
472
473static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
474{
475}
476
Tejun Heo52ebea72015-05-22 17:13:37 -0400477static inline void wb_memcg_offline(struct mem_cgroup *memcg)
478{
479}
480
481static inline void wb_blkcg_offline(struct blkcg *blkcg)
482{
483}
484
Tejun Heoebe41ab2015-05-22 17:13:50 -0400485struct wb_iter {
486 int next_id;
487};
488
489#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
490 for ((iter)->next_id = (start_blkcg_id); \
491 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
492
Tejun Heo703c2702015-05-22 17:13:44 -0400493static inline int inode_congested(struct inode *inode, int cong_bits)
494{
495 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
496}
497
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400498#endif /* CONFIG_CGROUP_WRITEBACK */
499
Tejun Heo703c2702015-05-22 17:13:44 -0400500static inline int inode_read_congested(struct inode *inode)
501{
502 return inode_congested(inode, 1 << WB_sync_congested);
503}
504
505static inline int inode_write_congested(struct inode *inode)
506{
507 return inode_congested(inode, 1 << WB_async_congested);
508}
509
510static inline int inode_rw_congested(struct inode *inode)
511{
512 return inode_congested(inode, (1 << WB_sync_congested) |
513 (1 << WB_async_congested));
514}
515
Tejun Heoec8a6f22015-05-22 17:13:41 -0400516static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
517{
518 return wb_congested(&bdi->wb, cong_bits);
519}
520
521static inline int bdi_read_congested(struct backing_dev_info *bdi)
522{
523 return bdi_congested(bdi, 1 << WB_sync_congested);
524}
525
526static inline int bdi_write_congested(struct backing_dev_info *bdi)
527{
528 return bdi_congested(bdi, 1 << WB_async_congested);
529}
530
531static inline int bdi_rw_congested(struct backing_dev_info *bdi)
532{
533 return bdi_congested(bdi, (1 << WB_sync_congested) |
534 (1 << WB_async_congested));
535}
536
Tejun Heo89e9b9e2015-05-22 17:13:36 -0400537#endif /* _LINUX_BACKING_DEV_H */