blob: 5228c62af41659bb7d5ae0e7db00969b9f16ef73 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * NUMA memory policies for Linux.
4 * Copyright 2003,2004 Andi Kleen SuSE Labs
5 */
David Howells607ca462012-10-13 10:46:48 +01006#ifndef _LINUX_MEMPOLICY_H
7#define _LINUX_MEMPOLICY_H 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mmzone.h>
Dan Williamsc1ef8e22016-12-12 16:43:12 -080011#include <linux/dax.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/slab.h>
13#include <linux/rbtree.h>
14#include <linux/spinlock.h>
Andi Kleendfcd3c02005-10-29 18:15:48 -070015#include <linux/nodemask.h>
Gerald Schaefer83d16742008-07-23 21:28:22 -070016#include <linux/pagemap.h>
David Howells607ca462012-10-13 10:46:48 +010017#include <uapi/linux/mempolicy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Ralf Baechle45b35a52006-06-08 00:43:41 -070019struct mm_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21#ifdef CONFIG_NUMA
22
23/*
24 * Describe a memory policy.
25 *
26 * A mempolicy can be either associated with a process or with a VMA.
27 * For VMA related allocations the VMA policy is preferred, otherwise
28 * the process policy is used. Interrupts ignore the memory policy
29 * of the current process.
30 *
31 * Locking policy for interlave:
32 * In process context there is no locking because only the process accesses
33 * its own state. All vma manipulation is somewhat protected by a down_read on
Hugh Dickinsb8072f02005-10-29 18:16:41 -070034 * mmap_sem.
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 *
36 * Freeing policy:
Mel Gorman19770b32008-04-28 02:12:18 -070037 * Mempolicy objects are reference counted. A mempolicy will be freed when
Lee Schermerhornf0be3d32008-04-28 02:13:08 -070038 * mpol_put() decrements the reference count to zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 *
Lee Schermerhorn846a16b2008-04-28 02:13:09 -070040 * Duplicating policy objects:
41 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
Mel Gorman19770b32008-04-28 02:12:18 -070042 * to the new storage. The reference count of the new object is initialized
Lee Schermerhorn846a16b2008-04-28 02:13:09 -070043 * to 1, representing the caller of mpol_dup().
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 */
45struct mempolicy {
46 atomic_t refcnt;
Lee Schermerhorn45c47452008-04-28 02:13:12 -070047 unsigned short mode; /* See MPOL_* above */
David Rientjes028fec42008-04-28 02:12:25 -070048 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 union {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 short preferred_node; /* preferred */
Mel Gorman19770b32008-04-28 02:12:18 -070051 nodemask_t nodes; /* interleave/bind */
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 /* undefined for default */
53 } v;
David Rientjesf5b087b2008-04-28 02:12:27 -070054 union {
55 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
56 nodemask_t user_nodemask; /* nodemask passed by user */
57 } w;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058};
59
60/*
61 * Support for managing mempolicy data objects (clone, copy, destroy)
62 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
63 */
64
Lee Schermerhornf0be3d32008-04-28 02:13:08 -070065extern void __mpol_put(struct mempolicy *pol);
66static inline void mpol_put(struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
68 if (pol)
Lee Schermerhornf0be3d32008-04-28 02:13:08 -070069 __mpol_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070}
71
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -070072/*
73 * Does mempolicy pol need explicit unref after use?
74 * Currently only needed for shared policies.
75 */
76static inline int mpol_needs_cond_ref(struct mempolicy *pol)
77{
78 return (pol && (pol->flags & MPOL_F_SHARED));
79}
80
81static inline void mpol_cond_put(struct mempolicy *pol)
82{
83 if (mpol_needs_cond_ref(pol))
84 __mpol_put(pol);
85}
86
Lee Schermerhorn846a16b2008-04-28 02:13:09 -070087extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
88static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
90 if (pol)
Lee Schermerhorn846a16b2008-04-28 02:13:09 -070091 pol = __mpol_dup(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 return pol;
93}
94
95#define vma_policy(vma) ((vma)->vm_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97static inline void mpol_get(struct mempolicy *pol)
98{
99 if (pol)
100 atomic_inc(&pol->refcnt);
101}
102
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -0800103extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
104static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
106 if (a == b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -0800107 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 return __mpol_equal(a, b);
109}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 * Tree of shared policies for a shared memory region.
113 * Maintain the policies in a pseudo mm that contains vmas. The vmas
114 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
115 * bytes, so that we can work with shared memory segments bigger than
116 * unsigned long.
117 */
118
119struct sp_node {
120 struct rb_node nd;
121 unsigned long start, end;
122 struct mempolicy *policy;
123};
124
125struct shared_policy {
126 struct rb_root root;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -0800127 rwlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128};
129
Oleg Nesterovef0855d2013-09-11 14:20:14 -0700130int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
Lee Schermerhorn71fe8042008-04-28 02:13:26 -0700131void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132int mpol_set_shared_policy(struct shared_policy *info,
133 struct vm_area_struct *vma,
134 struct mempolicy *new);
135void mpol_free_shared_policy(struct shared_policy *p);
136struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
137 unsigned long idx);
138
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700139struct mempolicy *get_task_policy(struct task_struct *p);
140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
141 unsigned long addr);
Oleg Nesterov6b6482b2014-10-09 15:27:48 -0700142bool vma_policy_mof(struct vm_area_struct *vma);
Stephen Wilsond98f6cb2011-05-24 17:12:41 -0700143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144extern void numa_default_policy(void);
145extern void numa_policy_init(void);
Vlastimil Babka213980c2017-07-06 15:40:06 -0700146extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
Paul Jackson42253992006-01-08 01:01:59 -0800147extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
Paul Jackson42253992006-01-08 01:01:59 -0800148
Vlastimil Babka04ec6262017-07-06 15:40:03 -0700149extern int huge_node(struct vm_area_struct *vma,
Mel Gorman19770b32008-04-28 02:12:18 -0700150 unsigned long addr, gfp_t gfp_flags,
151 struct mempolicy **mpol, nodemask_t **nodemask);
Lee Schermerhorn06808b02009-12-14 17:58:21 -0800152extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
David Rientjes6f48d0eb2010-08-09 17:18:52 -0700153extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
154 const nodemask_t *mask);
David Rientjes2a389612014-04-07 15:37:29 -0700155extern unsigned int mempolicy_slab_node(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700157extern enum zone_type policy_zone;
Christoph Lameter4be38e32006-01-06 00:11:17 -0800158
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700159static inline void check_highest_zone(enum zone_type k)
Christoph Lameter4be38e32006-01-06 00:11:17 -0800160{
Mel Gormanb377fd32007-08-22 14:02:05 -0700161 if (k > policy_zone && k != ZONE_MOVABLE)
Christoph Lameter4be38e32006-01-06 00:11:17 -0800162 policy_zone = k;
163}
164
Andrew Morton0ce72d42012-05-29 15:06:24 -0700165int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
166 const nodemask_t *to, int flags);
Christoph Lameter39743882006-01-08 01:00:51 -0800167
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700168
169#ifdef CONFIG_TMPFS
Hugh Dickinsa7a88b22013-01-02 02:04:23 -0800170extern int mpol_parse_str(char *str, struct mempolicy **mpol);
Stephen Wilson13057ef2011-05-24 17:12:46 -0700171#endif
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700172
David Rientjes948927e2013-11-12 15:07:28 -0800173extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
Gerald Schaefer83d16742008-07-23 21:28:22 -0700174
175/* Check if a vma is migratable */
Yaowei Bai4ee815b2016-05-19 17:11:32 -0700176static inline bool vma_migratable(struct vm_area_struct *vma)
Gerald Schaefer83d16742008-07-23 21:28:22 -0700177{
Naoya Horiguchi71ea2ef2013-09-11 14:22:08 -0700178 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
Yaowei Bai4ee815b2016-05-19 17:11:32 -0700179 return false;
Naoya Horiguchic177c812014-06-04 16:05:35 -0700180
Dan Williamsc1ef8e22016-12-12 16:43:12 -0800181 /*
182 * DAX device mappings require predictable access latency, so avoid
183 * incurring periodic faults.
184 */
185 if (vma_is_dax(vma))
186 return false;
187
Naoya Horiguchic177c812014-06-04 16:05:35 -0700188#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
189 if (vma->vm_flags & VM_HUGETLB)
Yaowei Bai4ee815b2016-05-19 17:11:32 -0700190 return false;
Naoya Horiguchic177c812014-06-04 16:05:35 -0700191#endif
192
Gerald Schaefer83d16742008-07-23 21:28:22 -0700193 /*
194 * Migration allocates pages in the highest zone. If we cannot
195 * do so then migration (at least from node to node) is not
196 * possible.
197 */
198 if (vma->vm_file &&
199 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
200 < policy_zone)
Yaowei Bai4ee815b2016-05-19 17:11:32 -0700201 return false;
202 return true;
Gerald Schaefer83d16742008-07-23 21:28:22 -0700203}
204
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +0200205extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
David Rientjesc11600e2016-09-01 16:15:07 -0700206extern void mpol_put_task_policy(struct task_struct *);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +0200207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208#else
209
210struct mempolicy {};
211
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -0800212static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -0800214 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Lee Schermerhornf0be3d32008-04-28 02:13:08 -0700217static inline void mpol_put(struct mempolicy *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218{
219}
220
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700221static inline void mpol_cond_put(struct mempolicy *pol)
222{
223}
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225static inline void mpol_get(struct mempolicy *pol)
226{
227}
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229struct shared_policy {};
230
Lee Schermerhorn71fe8042008-04-28 02:13:26 -0700231static inline void mpol_shared_policy_init(struct shared_policy *sp,
232 struct mempolicy *mpol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233{
234}
235
236static inline void mpol_free_shared_policy(struct shared_policy *p)
237{
238}
239
Hugh Dickins75edd342016-05-19 17:12:44 -0700240static inline struct mempolicy *
241mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
242{
243 return NULL;
244}
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246#define vma_policy(vma) NULL
Oleg Nesterovef0855d2013-09-11 14:20:14 -0700247
248static inline int
249vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
250{
251 return 0;
252}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254static inline void numa_policy_init(void)
255{
256}
257
258static inline void numa_default_policy(void)
259{
260}
261
Paul Jackson74cb2152006-01-08 01:01:56 -0800262static inline void mpol_rebind_task(struct task_struct *tsk,
Vlastimil Babka213980c2017-07-06 15:40:06 -0700263 const nodemask_t *new)
Paul Jackson68860ec2005-10-30 15:02:36 -0800264{
265}
266
Paul Jackson42253992006-01-08 01:01:59 -0800267static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
268{
269}
270
Vlastimil Babka04ec6262017-07-06 15:40:03 -0700271static inline int huge_node(struct vm_area_struct *vma,
Mel Gorman19770b32008-04-28 02:12:18 -0700272 unsigned long addr, gfp_t gfp_flags,
273 struct mempolicy **mpol, nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800274{
Mel Gorman19770b32008-04-28 02:12:18 -0700275 *mpol = NULL;
276 *nodemask = NULL;
Vlastimil Babka04ec6262017-07-06 15:40:03 -0700277 return 0;
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800278}
279
David Rientjes6f48d0eb2010-08-09 17:18:52 -0700280static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
281{
282 return false;
283}
284
Andrew Morton0ce72d42012-05-29 15:06:24 -0700285static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
286 const nodemask_t *to, int flags)
Paul Jackson45b07ef2006-01-08 01:00:56 -0800287{
288 return 0;
289}
290
Christoph Lameter4be38e32006-01-06 00:11:17 -0800291static inline void check_highest_zone(int k)
292{
293}
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700294
295#ifdef CONFIG_TMPFS
Hugh Dickinsa7a88b22013-01-02 02:04:23 -0800296static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700297{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -0700298 return 1; /* error */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700299}
Stephen Wilson13057ef2011-05-24 17:12:46 -0700300#endif
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700301
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +0200302static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
303 unsigned long address)
304{
305 return -1; /* no node preference */
306}
307
David Rientjesc11600e2016-09-01 16:15:07 -0700308static inline void mpol_put_task_policy(struct task_struct *task)
309{
310}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312#endif