blob: e630def131ceebe493c10a2bc4daa4a5776ba629 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H
4
5#include <linux/list.h>
6#include <linux/spinlock.h>
7#include <linux/mm_types.h>
Sagi Grimberg21a92732012-10-08 16:29:24 -07008#include <linux/srcu.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07009
10struct mmu_notifier;
11struct mmu_notifier_ops;
12
13#ifdef CONFIG_MMU_NOTIFIER
14
15/*
16 * The mmu notifier_mm structure is allocated and installed in
17 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
18 * critical section and it's released only when mm_count reaches zero
19 * in mmdrop().
20 */
21struct mmu_notifier_mm {
22 /* all mmu notifiers registerd in this mm are queued in this list */
23 struct hlist_head list;
24 /* to serialize the list modifications and hlist_unhashed */
25 spinlock_t lock;
26};
27
Jérôme Glisse5d6527a2018-12-28 00:38:05 -080028struct mmu_notifier_range {
29 struct mm_struct *mm;
30 unsigned long start;
31 unsigned long end;
32 bool blockable;
33};
34
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070035struct mmu_notifier_ops {
36 /*
37 * Called either by mmu_notifier_unregister or when the mm is
38 * being destroyed by exit_mmap, always before all pages are
39 * freed. This can run concurrently with other mmu notifier
40 * methods (the ones invoked outside the mm context) and it
41 * should tear down all secondary mmu mappings and freeze the
42 * secondary mmu. If this method isn't implemented you've to
43 * be sure that nothing could possibly write to the pages
44 * through the secondary mmu by the time the last thread with
45 * tsk->mm == mm exits.
46 *
47 * As side note: the pages freed after ->release returns could
48 * be immediately reallocated by the gart at an alias physical
49 * address with a different cache model, so if ->release isn't
50 * implemented because all _software_ driven memory accesses
51 * through the secondary mmu are terminated by the time the
52 * last thread of this mm quits, you've also to be sure that
53 * speculative _hardware_ operations can't allocate dirty
54 * cachelines in the cpu that could not be snooped and made
55 * coherent with the other read and write operations happening
56 * through the gart alias address, so leading to memory
57 * corruption.
58 */
59 void (*release)(struct mmu_notifier *mn,
60 struct mm_struct *mm);
61
62 /*
63 * clear_flush_young is called after the VM is
64 * test-and-clearing the young/accessed bitflag in the
65 * pte. This way the VM will provide proper aging to the
66 * accesses to the page through the secondary MMUs and not
67 * only to the ones through the Linux pte.
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070068 * Start-end is necessary in case the secondary MMU is mapping the page
69 * at a smaller granularity than the primary MMU.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070070 */
71 int (*clear_flush_young)(struct mmu_notifier *mn,
72 struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070073 unsigned long start,
74 unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070075
76 /*
Vladimir Davydov1d7715c2015-09-09 15:35:41 -070077 * clear_young is a lightweight version of clear_flush_young. Like the
78 * latter, it is supposed to test-and-clear the young/accessed bitflag
79 * in the secondary pte, but it may omit flushing the secondary tlb.
80 */
81 int (*clear_young)(struct mmu_notifier *mn,
82 struct mm_struct *mm,
83 unsigned long start,
84 unsigned long end);
85
86 /*
Andrea Arcangeli8ee53822011-01-13 15:47:10 -080087 * test_young is called to check the young/accessed bitflag in
88 * the secondary pte. This is used to know if the page is
89 * frequently used without actually clearing the flag or tearing
90 * down the secondary mapping on the page.
91 */
92 int (*test_young)(struct mmu_notifier *mn,
93 struct mm_struct *mm,
94 unsigned long address);
95
96 /*
Izik Eidus828502d2009-09-21 17:01:51 -070097 * change_pte is called in cases that pte mapping to page is changed:
98 * for example, when ksm remaps pte to point to a new shared page.
99 */
100 void (*change_pte)(struct mmu_notifier *mn,
101 struct mm_struct *mm,
102 unsigned long address,
103 pte_t pte);
104
105 /*
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700106 * invalidate_range_start() and invalidate_range_end() must be
107 * paired and are called only when the mmap_sem and/or the
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100108 * locks protecting the reverse maps are held. If the subsystem
109 * can't guarantee that no additional references are taken to
110 * the pages in the range, it has to implement the
111 * invalidate_range() notifier to remove any references taken
112 * after invalidate_range_start().
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700113 *
114 * Invalidation of multiple concurrent ranges may be
115 * optionally permitted by the driver. Either way the
116 * establishment of sptes is forbidden in the range passed to
117 * invalidate_range_begin/end for the whole duration of the
118 * invalidate_range_begin/end critical section.
119 *
120 * invalidate_range_start() is called when all pages in the
121 * range are still mapped and have at least a refcount of one.
122 *
123 * invalidate_range_end() is called when all pages in the
124 * range have been unmapped and the pages have been freed by
125 * the VM.
126 *
127 * The VM will remove the page table entries and potentially
128 * the page between invalidate_range_start() and
129 * invalidate_range_end(). If the page must not be freed
130 * because of pending I/O or other circumstances then the
131 * invalidate_range_start() callback (or the initial mapping
132 * by the driver) must make sure that the refcount is kept
133 * elevated.
134 *
135 * If the driver increases the refcount when the pages are
136 * initially mapped into an address space then either
137 * invalidate_range_start() or invalidate_range_end() may
138 * decrease the refcount. If the refcount is decreased on
139 * invalidate_range_start() then the VM can free pages as page
140 * table entries are removed. If the refcount is only
141 * droppped on invalidate_range_end() then the driver itself
142 * will drop the last refcount but it must take care to flush
143 * any secondary tlb before doing the final free on the
144 * page. Pages will no longer be referenced by the linux
145 * address space but may still be referenced by sptes until
146 * the last refcount is dropped.
David Rientjes5ff70912018-01-31 16:18:32 -0800147 *
Michal Hocko93065ac2018-08-21 21:52:33 -0700148 * If blockable argument is set to false then the callback cannot
149 * sleep and has to return with -EAGAIN. 0 should be returned
Michal Hocko33490af2018-10-26 15:03:35 -0700150 * otherwise. Please note that if invalidate_range_start approves
151 * a non-blocking behavior then the same applies to
152 * invalidate_range_end.
Michal Hocko93065ac2018-08-21 21:52:33 -0700153 *
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700154 */
Michal Hocko93065ac2018-08-21 21:52:33 -0700155 int (*invalidate_range_start)(struct mmu_notifier *mn,
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800156 const struct mmu_notifier_range *range);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700157 void (*invalidate_range_end)(struct mmu_notifier *mn,
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800158 const struct mmu_notifier_range *range);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100159
160 /*
161 * invalidate_range() is either called between
162 * invalidate_range_start() and invalidate_range_end() when the
163 * VM has to free pages that where unmapped, but before the
164 * pages are actually freed, or outside of _start()/_end() when
165 * a (remote) TLB is necessary.
166 *
167 * If invalidate_range() is used to manage a non-CPU TLB with
168 * shared page-tables, it not necessary to implement the
169 * invalidate_range_start()/end() notifiers, as
170 * invalidate_range() alread catches the points in time when an
Jérôme Glisse0f108512017-11-15 17:34:07 -0800171 * external TLB range needs to be flushed. For more in depth
Mike Rapoportad56b732018-03-21 21:22:47 +0200172 * discussion on this see Documentation/vm/mmu_notifier.rst
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100173 *
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100174 * Note that this function might be called with just a sub-range
175 * of what was passed to invalidate_range_start()/end(), if
176 * called between those functions.
177 */
178 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
179 unsigned long start, unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700180};
181
182/*
183 * The notifier chains are protected by mmap_sem and/or the reverse map
184 * semaphores. Notifier chains are only changed when all reverse maps and
185 * the mmap_sem locks are taken.
186 *
187 * Therefore notifier chains can only be traversed when either
188 *
189 * 1. mmap_sem is held.
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800190 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700191 * 3. No other concurrent thread can access the list (release)
192 */
193struct mmu_notifier {
194 struct hlist_node hlist;
195 const struct mmu_notifier_ops *ops;
196};
197
198static inline int mm_has_notifiers(struct mm_struct *mm)
199{
200 return unlikely(mm->mmu_notifier_mm);
201}
202
203extern int mmu_notifier_register(struct mmu_notifier *mn,
204 struct mm_struct *mm);
205extern int __mmu_notifier_register(struct mmu_notifier *mn,
206 struct mm_struct *mm);
207extern void mmu_notifier_unregister(struct mmu_notifier *mn,
208 struct mm_struct *mm);
Peter Zijlstrab9722162014-08-06 16:08:20 -0700209extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
210 struct mm_struct *mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700211extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
212extern void __mmu_notifier_release(struct mm_struct *mm);
213extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700214 unsigned long start,
215 unsigned long end);
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700216extern int __mmu_notifier_clear_young(struct mm_struct *mm,
217 unsigned long start,
218 unsigned long end);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800219extern int __mmu_notifier_test_young(struct mm_struct *mm,
220 unsigned long address);
Izik Eidus828502d2009-09-21 17:01:51 -0700221extern void __mmu_notifier_change_pte(struct mm_struct *mm,
222 unsigned long address, pte_t pte);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800223extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
224extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800225 bool only_end);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100226extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
227 unsigned long start, unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700228
Jérôme Glisse4a83bfe2019-05-13 17:20:34 -0700229static inline bool
230mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
231{
232 return range->blockable;
233}
234
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700235static inline void mmu_notifier_release(struct mm_struct *mm)
236{
237 if (mm_has_notifiers(mm))
238 __mmu_notifier_release(mm);
239}
240
241static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700242 unsigned long start,
243 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700244{
245 if (mm_has_notifiers(mm))
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700246 return __mmu_notifier_clear_flush_young(mm, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700247 return 0;
248}
249
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700250static inline int mmu_notifier_clear_young(struct mm_struct *mm,
251 unsigned long start,
252 unsigned long end)
253{
254 if (mm_has_notifiers(mm))
255 return __mmu_notifier_clear_young(mm, start, end);
256 return 0;
257}
258
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800259static inline int mmu_notifier_test_young(struct mm_struct *mm,
260 unsigned long address)
261{
262 if (mm_has_notifiers(mm))
263 return __mmu_notifier_test_young(mm, address);
264 return 0;
265}
266
Izik Eidus828502d2009-09-21 17:01:51 -0700267static inline void mmu_notifier_change_pte(struct mm_struct *mm,
268 unsigned long address, pte_t pte)
269{
270 if (mm_has_notifiers(mm))
271 __mmu_notifier_change_pte(mm, address, pte);
272}
273
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800274static inline void
275mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700276{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800277 if (mm_has_notifiers(range->mm)) {
278 range->blockable = true;
279 __mmu_notifier_invalidate_range_start(range);
280 }
Michal Hocko93065ac2018-08-21 21:52:33 -0700281}
282
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800283static inline int
284mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
Michal Hocko93065ac2018-08-21 21:52:33 -0700285{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800286 if (mm_has_notifiers(range->mm)) {
287 range->blockable = false;
288 return __mmu_notifier_invalidate_range_start(range);
289 }
Michal Hocko93065ac2018-08-21 21:52:33 -0700290 return 0;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700291}
292
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800293static inline void
294mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700295{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800296 if (mm_has_notifiers(range->mm))
297 __mmu_notifier_invalidate_range_end(range, false);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800298}
299
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800300static inline void
301mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800302{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800303 if (mm_has_notifiers(range->mm))
304 __mmu_notifier_invalidate_range_end(range, true);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700305}
306
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100307static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
308 unsigned long start, unsigned long end)
309{
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100310 if (mm_has_notifiers(mm))
311 __mmu_notifier_invalidate_range(mm, start, end);
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100312}
313
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700314static inline void mmu_notifier_mm_init(struct mm_struct *mm)
315{
316 mm->mmu_notifier_mm = NULL;
317}
318
319static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
320{
321 if (mm_has_notifiers(mm))
322 __mmu_notifier_mm_destroy(mm);
323}
324
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800325
326static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
327 struct mm_struct *mm,
328 unsigned long start,
329 unsigned long end)
330{
331 range->mm = mm;
332 range->start = start;
333 range->end = end;
334}
335
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700336#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
337({ \
338 int __young; \
339 struct vm_area_struct *___vma = __vma; \
340 unsigned long ___address = __address; \
341 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
342 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700343 ___address, \
344 ___address + \
345 PAGE_SIZE); \
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700346 __young; \
347})
348
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800349#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
350({ \
351 int __young; \
352 struct vm_area_struct *___vma = __vma; \
353 unsigned long ___address = __address; \
354 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
355 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700356 ___address, \
357 ___address + \
358 PMD_SIZE); \
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800359 __young; \
360})
361
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700362#define ptep_clear_young_notify(__vma, __address, __ptep) \
363({ \
364 int __young; \
365 struct vm_area_struct *___vma = __vma; \
366 unsigned long ___address = __address; \
367 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
368 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
369 ___address + PAGE_SIZE); \
370 __young; \
371})
372
373#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
374({ \
375 int __young; \
376 struct vm_area_struct *___vma = __vma; \
377 unsigned long ___address = __address; \
378 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
379 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
380 ___address + PMD_SIZE); \
381 __young; \
382})
383
Joerg Roedel34ee6452014-11-13 13:46:09 +1100384#define ptep_clear_flush_notify(__vma, __address, __ptep) \
385({ \
386 unsigned long ___addr = __address & PAGE_MASK; \
387 struct mm_struct *___mm = (__vma)->vm_mm; \
388 pte_t ___pte; \
389 \
390 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
391 mmu_notifier_invalidate_range(___mm, ___addr, \
392 ___addr + PAGE_SIZE); \
393 \
394 ___pte; \
395})
396
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700397#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
Joerg Roedel34ee6452014-11-13 13:46:09 +1100398({ \
399 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
400 struct mm_struct *___mm = (__vma)->vm_mm; \
401 pmd_t ___pmd; \
402 \
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700403 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
Joerg Roedel34ee6452014-11-13 13:46:09 +1100404 mmu_notifier_invalidate_range(___mm, ___haddr, \
405 ___haddr + HPAGE_PMD_SIZE); \
406 \
407 ___pmd; \
408})
409
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800410#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
411({ \
412 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
413 struct mm_struct *___mm = (__vma)->vm_mm; \
414 pud_t ___pud; \
415 \
416 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
417 mmu_notifier_invalidate_range(___mm, ___haddr, \
418 ___haddr + HPAGE_PUD_SIZE); \
419 \
420 ___pud; \
421})
422
Xiao Guangrong48af0d72012-10-08 16:29:23 -0700423/*
424 * set_pte_at_notify() sets the pte _after_ running the notifier.
425 * This is safe to start by updating the secondary MMUs, because the primary MMU
426 * pte invalidate must have already happened with a ptep_clear_flush() before
427 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
428 * required when we change both the protection of the mapping from read-only to
429 * read-write and the pfn (like during copy on write page faults). Otherwise the
430 * old page would remain mapped readonly in the secondary MMUs after the new
431 * page is already writable by some CPU through the primary MMU.
432 */
Izik Eidus828502d2009-09-21 17:01:51 -0700433#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
434({ \
435 struct mm_struct *___mm = __mm; \
436 unsigned long ___address = __address; \
437 pte_t ___pte = __pte; \
438 \
Izik Eidus828502d2009-09-21 17:01:51 -0700439 mmu_notifier_change_pte(___mm, ___address, ___pte); \
Xiao Guangrong48af0d72012-10-08 16:29:23 -0700440 set_pte_at(___mm, ___address, __ptep, ___pte); \
Izik Eidus828502d2009-09-21 17:01:51 -0700441})
442
Peter Zijlstrab9722162014-08-06 16:08:20 -0700443extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
444 void (*func)(struct rcu_head *rcu));
Peter Zijlstrab9722162014-08-06 16:08:20 -0700445
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700446#else /* CONFIG_MMU_NOTIFIER */
447
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800448struct mmu_notifier_range {
449 unsigned long start;
450 unsigned long end;
451};
452
453static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
454 unsigned long start,
455 unsigned long end)
456{
457 range->start = start;
458 range->end = end;
459}
460
461#define mmu_notifier_range_init(range, mm, start, end) \
462 _mmu_notifier_range_init(range, start, end)
463
Jérôme Glisse4a83bfe2019-05-13 17:20:34 -0700464static inline bool
465mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
466{
467 return true;
468}
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800469
Michal Hocko4d4bbd82017-10-03 16:14:50 -0700470static inline int mm_has_notifiers(struct mm_struct *mm)
471{
472 return 0;
473}
474
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700475static inline void mmu_notifier_release(struct mm_struct *mm)
476{
477}
478
479static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700480 unsigned long start,
481 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700482{
483 return 0;
484}
485
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800486static inline int mmu_notifier_test_young(struct mm_struct *mm,
487 unsigned long address)
488{
489 return 0;
490}
491
Izik Eidus828502d2009-09-21 17:01:51 -0700492static inline void mmu_notifier_change_pte(struct mm_struct *mm,
493 unsigned long address, pte_t pte)
494{
495}
496
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800497static inline void
498mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700499{
500}
501
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800502static inline int
503mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
Michal Hocko93065ac2018-08-21 21:52:33 -0700504{
505 return 0;
506}
507
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800508static inline
509void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700510{
511}
512
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800513static inline void
514mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800515{
516}
517
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100518static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
519 unsigned long start, unsigned long end)
520{
521}
522
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700523static inline void mmu_notifier_mm_init(struct mm_struct *mm)
524{
525}
526
527static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
528{
529}
530
531#define ptep_clear_flush_young_notify ptep_clear_flush_young
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800532#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700533#define ptep_clear_young_notify ptep_test_and_clear_young
534#define pmdp_clear_young_notify pmdp_test_and_clear_young
Joerg Roedel34ee6452014-11-13 13:46:09 +1100535#define ptep_clear_flush_notify ptep_clear_flush
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700536#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800537#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
Izik Eidus828502d2009-09-21 17:01:51 -0700538#define set_pte_at_notify set_pte_at
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700539
540#endif /* CONFIG_MMU_NOTIFIER */
541
542#endif /* _LINUX_MMU_NOTIFIER_H */