Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame^] | 1 | #ifndef __LINUX_BACKING_DEV_DEFS_H |
| 2 | #define __LINUX_BACKING_DEV_DEFS_H |
| 3 | |
| 4 | #include <linux/list.h> |
| 5 | #include <linux/spinlock.h> |
| 6 | #include <linux/percpu_counter.h> |
| 7 | #include <linux/flex_proportions.h> |
| 8 | #include <linux/timer.h> |
| 9 | #include <linux/workqueue.h> |
| 10 | |
| 11 | struct page; |
| 12 | struct device; |
| 13 | struct dentry; |
| 14 | |
| 15 | /* |
| 16 | * Bits in bdi_writeback.state |
| 17 | */ |
| 18 | enum wb_state { |
| 19 | WB_async_congested, /* The async (write) queue is getting full */ |
| 20 | WB_sync_congested, /* The sync queue is getting full */ |
| 21 | WB_registered, /* bdi_register() was done */ |
| 22 | WB_writeback_running, /* Writeback is in progress */ |
| 23 | }; |
| 24 | |
| 25 | typedef int (congested_fn)(void *, int); |
| 26 | |
| 27 | enum wb_stat_item { |
| 28 | WB_RECLAIMABLE, |
| 29 | WB_WRITEBACK, |
| 30 | WB_DIRTIED, |
| 31 | WB_WRITTEN, |
| 32 | NR_WB_STAT_ITEMS |
| 33 | }; |
| 34 | |
| 35 | #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) |
| 36 | |
| 37 | struct bdi_writeback { |
| 38 | struct backing_dev_info *bdi; /* our parent bdi */ |
| 39 | |
| 40 | unsigned long state; /* Always use atomic bitops on this */ |
| 41 | unsigned long last_old_flush; /* last old data flush */ |
| 42 | |
| 43 | struct list_head b_dirty; /* dirty inodes */ |
| 44 | struct list_head b_io; /* parked for writeback */ |
| 45 | struct list_head b_more_io; /* parked for more writeback */ |
| 46 | struct list_head b_dirty_time; /* time stamps are dirty */ |
| 47 | spinlock_t list_lock; /* protects the b_* lists */ |
| 48 | |
| 49 | struct percpu_counter stat[NR_WB_STAT_ITEMS]; |
| 50 | |
| 51 | unsigned long bw_time_stamp; /* last time write bw is updated */ |
| 52 | unsigned long dirtied_stamp; |
| 53 | unsigned long written_stamp; /* pages written at bw_time_stamp */ |
| 54 | unsigned long write_bandwidth; /* the estimated write bandwidth */ |
| 55 | unsigned long avg_write_bandwidth; /* further smoothed write bw */ |
| 56 | |
| 57 | /* |
| 58 | * The base dirty throttle rate, re-calculated on every 200ms. |
| 59 | * All the bdi tasks' dirty rate will be curbed under it. |
| 60 | * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit |
| 61 | * in small steps and is much more smooth/stable than the latter. |
| 62 | */ |
| 63 | unsigned long dirty_ratelimit; |
| 64 | unsigned long balanced_dirty_ratelimit; |
| 65 | |
| 66 | struct fprop_local_percpu completions; |
| 67 | int dirty_exceeded; |
| 68 | |
| 69 | spinlock_t work_lock; /* protects work_list & dwork scheduling */ |
| 70 | struct list_head work_list; |
| 71 | struct delayed_work dwork; /* work item used for writeback */ |
| 72 | }; |
| 73 | |
| 74 | struct backing_dev_info { |
| 75 | struct list_head bdi_list; |
| 76 | unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ |
| 77 | unsigned int capabilities; /* Device capabilities */ |
| 78 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
| 79 | void *congested_data; /* Pointer to aux data for congested func */ |
| 80 | |
| 81 | char *name; |
| 82 | |
| 83 | unsigned int min_ratio; |
| 84 | unsigned int max_ratio, max_prop_frac; |
| 85 | |
| 86 | struct bdi_writeback wb; /* default writeback info for this bdi */ |
| 87 | |
| 88 | struct device *dev; |
| 89 | |
| 90 | struct timer_list laptop_mode_wb_timer; |
| 91 | |
| 92 | #ifdef CONFIG_DEBUG_FS |
| 93 | struct dentry *debug_dir; |
| 94 | struct dentry *debug_stats; |
| 95 | #endif |
| 96 | }; |
| 97 | |
| 98 | enum { |
| 99 | BLK_RW_ASYNC = 0, |
| 100 | BLK_RW_SYNC = 1, |
| 101 | }; |
| 102 | |
| 103 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync); |
| 104 | void set_bdi_congested(struct backing_dev_info *bdi, int sync); |
| 105 | |
| 106 | #endif /* __LINUX_BACKING_DEV_DEFS_H */ |