blob: 51d189615bdaac321e32a6966ad689b7c37b78ce [file] [log] [blame]
Dave Chinnerb0d40c92011-07-08 14:14:42 +10001#ifndef _LINUX_SHRINKER_H
2#define _LINUX_SHRINKER_H
3
4/*
5 * This struct is used to pass information from page reclaim to the shrinkers.
6 * We consolidate the values for easier extention later.
Dave Chinner24f7c6b2013-08-28 10:17:56 +10007 *
8 * The 'gfpmask' refers to the allocation we are currently trying to
9 * fulfil.
Dave Chinnerb0d40c92011-07-08 14:14:42 +100010 */
11struct shrink_control {
12 gfp_t gfp_mask;
13
Dave Chinnera0b02132013-08-28 10:18:16 +100014 /*
15 * How many objects scan_objects should scan and try to reclaim.
16 * This is reset before every call, so it is safe for callees
17 * to modify.
18 */
Dave Chinnerb0d40c92011-07-08 14:14:42 +100019 unsigned long nr_to_scan;
Dave Chinner0ce3d742013-08-28 10:18:03 +100020
Chris Wilsond460acb2017-09-06 16:19:26 -070021 /*
22 * How many objects did scan_objects process?
23 * This defaults to nr_to_scan before every call, but the callee
24 * should track its actual progress.
25 */
26 unsigned long nr_scanned;
27
Glauber Costa1d3d4432013-08-28 10:18:04 +100028 /* current node being shrunk (for NUMA aware shrinkers) */
29 int nid;
Vladimir Davydovcb731d62015-02-12 14:58:54 -080030
31 /* current memcg being shrunk (for memcg aware shrinkers) */
32 struct mem_cgroup *memcg;
Dave Chinnerb0d40c92011-07-08 14:14:42 +100033};
34
Dave Chinner24f7c6b2013-08-28 10:17:56 +100035#define SHRINK_STOP (~0UL)
Dave Chinnerb0d40c92011-07-08 14:14:42 +100036/*
37 * A callback you can register to apply pressure to ageable caches.
38 *
Dave Chinner24f7c6b2013-08-28 10:17:56 +100039 * @count_objects should return the number of freeable items in the cache. If
40 * there are no objects to free or the number of freeable items cannot be
41 * determined, it should return 0. No deadlock checks should be done during the
42 * count callback - the shrinker relies on aggregating scan counts that couldn't
43 * be executed due to potential deadlocks to be run at a later call when the
44 * deadlock condition is no longer pending.
Dave Chinnerb0d40c92011-07-08 14:14:42 +100045 *
Dave Chinner24f7c6b2013-08-28 10:17:56 +100046 * @scan_objects will only be called if @count_objects returned a non-zero
47 * value for the number of freeable objects. The callout should scan the cache
48 * and attempt to free items from the cache. It should then return the number
49 * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
50 * due to potential deadlocks. If SHRINK_STOP is returned, then no further
51 * attempts to call the @scan_objects will be made from the current reclaim
52 * context.
Glauber Costa1d3d4432013-08-28 10:18:04 +100053 *
54 * @flags determine the shrinker abilities, like numa awareness
Dave Chinnerb0d40c92011-07-08 14:14:42 +100055 */
56struct shrinker {
Dave Chinner24f7c6b2013-08-28 10:17:56 +100057 unsigned long (*count_objects)(struct shrinker *,
58 struct shrink_control *sc);
59 unsigned long (*scan_objects)(struct shrinker *,
60 struct shrink_control *sc);
61
Dave Chinnerb0d40c92011-07-08 14:14:42 +100062 int seeks; /* seeks to recreate an obj */
63 long batch; /* reclaim batch size, 0 = default */
Glauber Costa1d3d4432013-08-28 10:18:04 +100064 unsigned long flags;
Dave Chinnerb0d40c92011-07-08 14:14:42 +100065
66 /* These are for internal use */
67 struct list_head list;
Glauber Costa1d3d4432013-08-28 10:18:04 +100068 /* objs pending delete, per node */
69 atomic_long_t *nr_deferred;
Dave Chinnerb0d40c92011-07-08 14:14:42 +100070};
71#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
Glauber Costa1d3d4432013-08-28 10:18:04 +100072
73/* Flags */
Vladimir Davydovcb731d62015-02-12 14:58:54 -080074#define SHRINKER_NUMA_AWARE (1 << 0)
75#define SHRINKER_MEMCG_AWARE (1 << 1)
Glauber Costa1d3d4432013-08-28 10:18:04 +100076
77extern int register_shrinker(struct shrinker *);
Dave Chinnerb0d40c92011-07-08 14:14:42 +100078extern void unregister_shrinker(struct shrinker *);
79#endif