blob: 698e371aafe327d71904c2a2872bf1da83fb74f9 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H
4
David Rientjes5ff70912018-01-31 16:18:32 -08005#include <linux/types.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07006#include <linux/list.h>
7#include <linux/spinlock.h>
8#include <linux/mm_types.h>
Sagi Grimberg21a92732012-10-08 16:29:24 -07009#include <linux/srcu.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070010
11struct mmu_notifier;
12struct mmu_notifier_ops;
13
David Rientjes5ff70912018-01-31 16:18:32 -080014/* mmu_notifier_ops flags */
15#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
16
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070017#ifdef CONFIG_MMU_NOTIFIER
18
19/*
20 * The mmu notifier_mm structure is allocated and installed in
21 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
22 * critical section and it's released only when mm_count reaches zero
23 * in mmdrop().
24 */
25struct mmu_notifier_mm {
26 /* all mmu notifiers registerd in this mm are queued in this list */
27 struct hlist_head list;
28 /* to serialize the list modifications and hlist_unhashed */
29 spinlock_t lock;
30};
31
32struct mmu_notifier_ops {
33 /*
David Rientjes5ff70912018-01-31 16:18:32 -080034 * Flags to specify behavior of callbacks for this MMU notifier.
35 * Used to determine which context an operation may be called.
36 *
37 * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not
38 * block
39 */
40 int flags;
41
42 /*
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070043 * Called either by mmu_notifier_unregister or when the mm is
44 * being destroyed by exit_mmap, always before all pages are
45 * freed. This can run concurrently with other mmu notifier
46 * methods (the ones invoked outside the mm context) and it
47 * should tear down all secondary mmu mappings and freeze the
48 * secondary mmu. If this method isn't implemented you've to
49 * be sure that nothing could possibly write to the pages
50 * through the secondary mmu by the time the last thread with
51 * tsk->mm == mm exits.
52 *
53 * As side note: the pages freed after ->release returns could
54 * be immediately reallocated by the gart at an alias physical
55 * address with a different cache model, so if ->release isn't
56 * implemented because all _software_ driven memory accesses
57 * through the secondary mmu are terminated by the time the
58 * last thread of this mm quits, you've also to be sure that
59 * speculative _hardware_ operations can't allocate dirty
60 * cachelines in the cpu that could not be snooped and made
61 * coherent with the other read and write operations happening
62 * through the gart alias address, so leading to memory
63 * corruption.
64 */
65 void (*release)(struct mmu_notifier *mn,
66 struct mm_struct *mm);
67
68 /*
69 * clear_flush_young is called after the VM is
70 * test-and-clearing the young/accessed bitflag in the
71 * pte. This way the VM will provide proper aging to the
72 * accesses to the page through the secondary MMUs and not
73 * only to the ones through the Linux pte.
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070074 * Start-end is necessary in case the secondary MMU is mapping the page
75 * at a smaller granularity than the primary MMU.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070076 */
77 int (*clear_flush_young)(struct mmu_notifier *mn,
78 struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070079 unsigned long start,
80 unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070081
82 /*
Vladimir Davydov1d7715c2015-09-09 15:35:41 -070083 * clear_young is a lightweight version of clear_flush_young. Like the
84 * latter, it is supposed to test-and-clear the young/accessed bitflag
85 * in the secondary pte, but it may omit flushing the secondary tlb.
86 */
87 int (*clear_young)(struct mmu_notifier *mn,
88 struct mm_struct *mm,
89 unsigned long start,
90 unsigned long end);
91
92 /*
Andrea Arcangeli8ee53822011-01-13 15:47:10 -080093 * test_young is called to check the young/accessed bitflag in
94 * the secondary pte. This is used to know if the page is
95 * frequently used without actually clearing the flag or tearing
96 * down the secondary mapping on the page.
97 */
98 int (*test_young)(struct mmu_notifier *mn,
99 struct mm_struct *mm,
100 unsigned long address);
101
102 /*
Izik Eidus828502d2009-09-21 17:01:51 -0700103 * change_pte is called in cases that pte mapping to page is changed:
104 * for example, when ksm remaps pte to point to a new shared page.
105 */
106 void (*change_pte)(struct mmu_notifier *mn,
107 struct mm_struct *mm,
108 unsigned long address,
109 pte_t pte);
110
111 /*
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700112 * invalidate_range_start() and invalidate_range_end() must be
113 * paired and are called only when the mmap_sem and/or the
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100114 * locks protecting the reverse maps are held. If the subsystem
115 * can't guarantee that no additional references are taken to
116 * the pages in the range, it has to implement the
117 * invalidate_range() notifier to remove any references taken
118 * after invalidate_range_start().
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700119 *
120 * Invalidation of multiple concurrent ranges may be
121 * optionally permitted by the driver. Either way the
122 * establishment of sptes is forbidden in the range passed to
123 * invalidate_range_begin/end for the whole duration of the
124 * invalidate_range_begin/end critical section.
125 *
126 * invalidate_range_start() is called when all pages in the
127 * range are still mapped and have at least a refcount of one.
128 *
129 * invalidate_range_end() is called when all pages in the
130 * range have been unmapped and the pages have been freed by
131 * the VM.
132 *
133 * The VM will remove the page table entries and potentially
134 * the page between invalidate_range_start() and
135 * invalidate_range_end(). If the page must not be freed
136 * because of pending I/O or other circumstances then the
137 * invalidate_range_start() callback (or the initial mapping
138 * by the driver) must make sure that the refcount is kept
139 * elevated.
140 *
141 * If the driver increases the refcount when the pages are
142 * initially mapped into an address space then either
143 * invalidate_range_start() or invalidate_range_end() may
144 * decrease the refcount. If the refcount is decreased on
145 * invalidate_range_start() then the VM can free pages as page
146 * table entries are removed. If the refcount is only
147 * droppped on invalidate_range_end() then the driver itself
148 * will drop the last refcount but it must take care to flush
149 * any secondary tlb before doing the final free on the
150 * page. Pages will no longer be referenced by the linux
151 * address space but may still be referenced by sptes until
152 * the last refcount is dropped.
David Rientjes5ff70912018-01-31 16:18:32 -0800153 *
Michal Hocko93065ac2018-08-21 21:52:33 -0700154 * If blockable argument is set to false then the callback cannot
155 * sleep and has to return with -EAGAIN. 0 should be returned
Michal Hocko33490af2018-10-26 15:03:35 -0700156 * otherwise. Please note that if invalidate_range_start approves
157 * a non-blocking behavior then the same applies to
158 * invalidate_range_end.
Michal Hocko93065ac2018-08-21 21:52:33 -0700159 *
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700160 */
Michal Hocko93065ac2018-08-21 21:52:33 -0700161 int (*invalidate_range_start)(struct mmu_notifier *mn,
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700162 struct mm_struct *mm,
Michal Hocko93065ac2018-08-21 21:52:33 -0700163 unsigned long start, unsigned long end,
164 bool blockable);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700165 void (*invalidate_range_end)(struct mmu_notifier *mn,
166 struct mm_struct *mm,
167 unsigned long start, unsigned long end);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100168
169 /*
170 * invalidate_range() is either called between
171 * invalidate_range_start() and invalidate_range_end() when the
172 * VM has to free pages that where unmapped, but before the
173 * pages are actually freed, or outside of _start()/_end() when
174 * a (remote) TLB is necessary.
175 *
176 * If invalidate_range() is used to manage a non-CPU TLB with
177 * shared page-tables, it not necessary to implement the
178 * invalidate_range_start()/end() notifiers, as
179 * invalidate_range() alread catches the points in time when an
Jérôme Glisse0f108512017-11-15 17:34:07 -0800180 * external TLB range needs to be flushed. For more in depth
Mike Rapoportad56b732018-03-21 21:22:47 +0200181 * discussion on this see Documentation/vm/mmu_notifier.rst
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100182 *
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100183 * Note that this function might be called with just a sub-range
184 * of what was passed to invalidate_range_start()/end(), if
185 * called between those functions.
David Rientjes5ff70912018-01-31 16:18:32 -0800186 *
187 * If this callback cannot block, and invalidate_range_{start,end}
188 * cannot block, mmu_notifier_ops.flags should have
189 * MMU_INVALIDATE_DOES_NOT_BLOCK set.
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100190 */
191 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
192 unsigned long start, unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700193};
194
195/*
196 * The notifier chains are protected by mmap_sem and/or the reverse map
197 * semaphores. Notifier chains are only changed when all reverse maps and
198 * the mmap_sem locks are taken.
199 *
200 * Therefore notifier chains can only be traversed when either
201 *
202 * 1. mmap_sem is held.
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800203 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700204 * 3. No other concurrent thread can access the list (release)
205 */
206struct mmu_notifier {
207 struct hlist_node hlist;
208 const struct mmu_notifier_ops *ops;
209};
210
211static inline int mm_has_notifiers(struct mm_struct *mm)
212{
213 return unlikely(mm->mmu_notifier_mm);
214}
215
216extern int mmu_notifier_register(struct mmu_notifier *mn,
217 struct mm_struct *mm);
218extern int __mmu_notifier_register(struct mmu_notifier *mn,
219 struct mm_struct *mm);
220extern void mmu_notifier_unregister(struct mmu_notifier *mn,
221 struct mm_struct *mm);
Peter Zijlstrab9722162014-08-06 16:08:20 -0700222extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
223 struct mm_struct *mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700224extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
225extern void __mmu_notifier_release(struct mm_struct *mm);
226extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700227 unsigned long start,
228 unsigned long end);
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700229extern int __mmu_notifier_clear_young(struct mm_struct *mm,
230 unsigned long start,
231 unsigned long end);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800232extern int __mmu_notifier_test_young(struct mm_struct *mm,
233 unsigned long address);
Izik Eidus828502d2009-09-21 17:01:51 -0700234extern void __mmu_notifier_change_pte(struct mm_struct *mm,
235 unsigned long address, pte_t pte);
Michal Hocko93065ac2018-08-21 21:52:33 -0700236extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
237 unsigned long start, unsigned long end,
238 bool blockable);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700239extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800240 unsigned long start, unsigned long end,
241 bool only_end);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100242extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
243 unsigned long start, unsigned long end);
David Rientjes5ff70912018-01-31 16:18:32 -0800244extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700245
246static inline void mmu_notifier_release(struct mm_struct *mm)
247{
248 if (mm_has_notifiers(mm))
249 __mmu_notifier_release(mm);
250}
251
252static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700253 unsigned long start,
254 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700255{
256 if (mm_has_notifiers(mm))
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700257 return __mmu_notifier_clear_flush_young(mm, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700258 return 0;
259}
260
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700261static inline int mmu_notifier_clear_young(struct mm_struct *mm,
262 unsigned long start,
263 unsigned long end)
264{
265 if (mm_has_notifiers(mm))
266 return __mmu_notifier_clear_young(mm, start, end);
267 return 0;
268}
269
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800270static inline int mmu_notifier_test_young(struct mm_struct *mm,
271 unsigned long address)
272{
273 if (mm_has_notifiers(mm))
274 return __mmu_notifier_test_young(mm, address);
275 return 0;
276}
277
Izik Eidus828502d2009-09-21 17:01:51 -0700278static inline void mmu_notifier_change_pte(struct mm_struct *mm,
279 unsigned long address, pte_t pte)
280{
281 if (mm_has_notifiers(mm))
282 __mmu_notifier_change_pte(mm, address, pte);
283}
284
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700285static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
286 unsigned long start, unsigned long end)
287{
288 if (mm_has_notifiers(mm))
Michal Hocko93065ac2018-08-21 21:52:33 -0700289 __mmu_notifier_invalidate_range_start(mm, start, end, true);
290}
291
292static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
293 unsigned long start, unsigned long end)
294{
295 if (mm_has_notifiers(mm))
296 return __mmu_notifier_invalidate_range_start(mm, start, end, false);
297 return 0;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700298}
299
300static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
301 unsigned long start, unsigned long end)
302{
303 if (mm_has_notifiers(mm))
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800304 __mmu_notifier_invalidate_range_end(mm, start, end, false);
305}
306
307static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
308 unsigned long start, unsigned long end)
309{
310 if (mm_has_notifiers(mm))
311 __mmu_notifier_invalidate_range_end(mm, start, end, true);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700312}
313
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100314static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
315 unsigned long start, unsigned long end)
316{
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100317 if (mm_has_notifiers(mm))
318 __mmu_notifier_invalidate_range(mm, start, end);
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100319}
320
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700321static inline void mmu_notifier_mm_init(struct mm_struct *mm)
322{
323 mm->mmu_notifier_mm = NULL;
324}
325
326static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
327{
328 if (mm_has_notifiers(mm))
329 __mmu_notifier_mm_destroy(mm);
330}
331
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700332#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
333({ \
334 int __young; \
335 struct vm_area_struct *___vma = __vma; \
336 unsigned long ___address = __address; \
337 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
338 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700339 ___address, \
340 ___address + \
341 PAGE_SIZE); \
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700342 __young; \
343})
344
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800345#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
346({ \
347 int __young; \
348 struct vm_area_struct *___vma = __vma; \
349 unsigned long ___address = __address; \
350 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
351 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700352 ___address, \
353 ___address + \
354 PMD_SIZE); \
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800355 __young; \
356})
357
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700358#define ptep_clear_young_notify(__vma, __address, __ptep) \
359({ \
360 int __young; \
361 struct vm_area_struct *___vma = __vma; \
362 unsigned long ___address = __address; \
363 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
364 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
365 ___address + PAGE_SIZE); \
366 __young; \
367})
368
369#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
370({ \
371 int __young; \
372 struct vm_area_struct *___vma = __vma; \
373 unsigned long ___address = __address; \
374 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
375 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
376 ___address + PMD_SIZE); \
377 __young; \
378})
379
Joerg Roedel34ee6452014-11-13 13:46:09 +1100380#define ptep_clear_flush_notify(__vma, __address, __ptep) \
381({ \
382 unsigned long ___addr = __address & PAGE_MASK; \
383 struct mm_struct *___mm = (__vma)->vm_mm; \
384 pte_t ___pte; \
385 \
386 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
387 mmu_notifier_invalidate_range(___mm, ___addr, \
388 ___addr + PAGE_SIZE); \
389 \
390 ___pte; \
391})
392
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700393#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
Joerg Roedel34ee6452014-11-13 13:46:09 +1100394({ \
395 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
396 struct mm_struct *___mm = (__vma)->vm_mm; \
397 pmd_t ___pmd; \
398 \
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700399 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
Joerg Roedel34ee6452014-11-13 13:46:09 +1100400 mmu_notifier_invalidate_range(___mm, ___haddr, \
401 ___haddr + HPAGE_PMD_SIZE); \
402 \
403 ___pmd; \
404})
405
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800406#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
407({ \
408 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
409 struct mm_struct *___mm = (__vma)->vm_mm; \
410 pud_t ___pud; \
411 \
412 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
413 mmu_notifier_invalidate_range(___mm, ___haddr, \
414 ___haddr + HPAGE_PUD_SIZE); \
415 \
416 ___pud; \
417})
418
Xiao Guangrong48af0d72012-10-08 16:29:23 -0700419/*
420 * set_pte_at_notify() sets the pte _after_ running the notifier.
421 * This is safe to start by updating the secondary MMUs, because the primary MMU
422 * pte invalidate must have already happened with a ptep_clear_flush() before
423 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
424 * required when we change both the protection of the mapping from read-only to
425 * read-write and the pfn (like during copy on write page faults). Otherwise the
426 * old page would remain mapped readonly in the secondary MMUs after the new
427 * page is already writable by some CPU through the primary MMU.
428 */
Izik Eidus828502d2009-09-21 17:01:51 -0700429#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
430({ \
431 struct mm_struct *___mm = __mm; \
432 unsigned long ___address = __address; \
433 pte_t ___pte = __pte; \
434 \
Izik Eidus828502d2009-09-21 17:01:51 -0700435 mmu_notifier_change_pte(___mm, ___address, ___pte); \
Xiao Guangrong48af0d72012-10-08 16:29:23 -0700436 set_pte_at(___mm, ___address, __ptep, ___pte); \
Izik Eidus828502d2009-09-21 17:01:51 -0700437})
438
Peter Zijlstrab9722162014-08-06 16:08:20 -0700439extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
440 void (*func)(struct rcu_head *rcu));
441extern void mmu_notifier_synchronize(void);
442
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700443#else /* CONFIG_MMU_NOTIFIER */
444
Michal Hocko4d4bbd82017-10-03 16:14:50 -0700445static inline int mm_has_notifiers(struct mm_struct *mm)
446{
447 return 0;
448}
449
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700450static inline void mmu_notifier_release(struct mm_struct *mm)
451{
452}
453
454static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700455 unsigned long start,
456 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700457{
458 return 0;
459}
460
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800461static inline int mmu_notifier_test_young(struct mm_struct *mm,
462 unsigned long address)
463{
464 return 0;
465}
466
Izik Eidus828502d2009-09-21 17:01:51 -0700467static inline void mmu_notifier_change_pte(struct mm_struct *mm,
468 unsigned long address, pte_t pte)
469{
470}
471
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700472static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
473 unsigned long start, unsigned long end)
474{
475}
476
Michal Hocko93065ac2018-08-21 21:52:33 -0700477static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
478 unsigned long start, unsigned long end)
479{
480 return 0;
481}
482
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700483static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
484 unsigned long start, unsigned long end)
485{
486}
487
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800488static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
489 unsigned long start, unsigned long end)
490{
491}
492
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100493static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
494 unsigned long start, unsigned long end)
495{
496}
497
David Rientjes5ff70912018-01-31 16:18:32 -0800498static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
499{
500 return false;
501}
502
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700503static inline void mmu_notifier_mm_init(struct mm_struct *mm)
504{
505}
506
507static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
508{
509}
510
511#define ptep_clear_flush_young_notify ptep_clear_flush_young
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800512#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700513#define ptep_clear_young_notify ptep_test_and_clear_young
514#define pmdp_clear_young_notify pmdp_test_and_clear_young
Joerg Roedel34ee6452014-11-13 13:46:09 +1100515#define ptep_clear_flush_notify ptep_clear_flush
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700516#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800517#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
Izik Eidus828502d2009-09-21 17:01:51 -0700518#define set_pte_at_notify set_pte_at
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700519
520#endif /* CONFIG_MMU_NOTIFIER */
521
522#endif /* _LINUX_MMU_NOTIFIER_H */