blob: 913c3c13e36e9ab737f56afd6a20b2f1d80d5560 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H
4
5#include <linux/list.h>
6#include <linux/spinlock.h>
7#include <linux/mm_types.h>
Sagi Grimberg21a92732012-10-08 16:29:24 -07008#include <linux/srcu.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07009
10struct mmu_notifier;
11struct mmu_notifier_ops;
12
13#ifdef CONFIG_MMU_NOTIFIER
14
15/*
16 * The mmu notifier_mm structure is allocated and installed in
17 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
18 * critical section and it's released only when mm_count reaches zero
19 * in mmdrop().
20 */
21struct mmu_notifier_mm {
22 /* all mmu notifiers registerd in this mm are queued in this list */
23 struct hlist_head list;
24 /* to serialize the list modifications and hlist_unhashed */
25 spinlock_t lock;
26};
27
28struct mmu_notifier_ops {
29 /*
30 * Called either by mmu_notifier_unregister or when the mm is
31 * being destroyed by exit_mmap, always before all pages are
32 * freed. This can run concurrently with other mmu notifier
33 * methods (the ones invoked outside the mm context) and it
34 * should tear down all secondary mmu mappings and freeze the
35 * secondary mmu. If this method isn't implemented you've to
36 * be sure that nothing could possibly write to the pages
37 * through the secondary mmu by the time the last thread with
38 * tsk->mm == mm exits.
39 *
40 * As side note: the pages freed after ->release returns could
41 * be immediately reallocated by the gart at an alias physical
42 * address with a different cache model, so if ->release isn't
43 * implemented because all _software_ driven memory accesses
44 * through the secondary mmu are terminated by the time the
45 * last thread of this mm quits, you've also to be sure that
46 * speculative _hardware_ operations can't allocate dirty
47 * cachelines in the cpu that could not be snooped and made
48 * coherent with the other read and write operations happening
49 * through the gart alias address, so leading to memory
50 * corruption.
51 */
52 void (*release)(struct mmu_notifier *mn,
53 struct mm_struct *mm);
54
55 /*
56 * clear_flush_young is called after the VM is
57 * test-and-clearing the young/accessed bitflag in the
58 * pte. This way the VM will provide proper aging to the
59 * accesses to the page through the secondary MMUs and not
60 * only to the ones through the Linux pte.
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070061 * Start-end is necessary in case the secondary MMU is mapping the page
62 * at a smaller granularity than the primary MMU.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070063 */
64 int (*clear_flush_young)(struct mmu_notifier *mn,
65 struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070066 unsigned long start,
67 unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070068
69 /*
Vladimir Davydov1d7715c2015-09-09 15:35:41 -070070 * clear_young is a lightweight version of clear_flush_young. Like the
71 * latter, it is supposed to test-and-clear the young/accessed bitflag
72 * in the secondary pte, but it may omit flushing the secondary tlb.
73 */
74 int (*clear_young)(struct mmu_notifier *mn,
75 struct mm_struct *mm,
76 unsigned long start,
77 unsigned long end);
78
79 /*
Andrea Arcangeli8ee53822011-01-13 15:47:10 -080080 * test_young is called to check the young/accessed bitflag in
81 * the secondary pte. This is used to know if the page is
82 * frequently used without actually clearing the flag or tearing
83 * down the secondary mapping on the page.
84 */
85 int (*test_young)(struct mmu_notifier *mn,
86 struct mm_struct *mm,
87 unsigned long address);
88
89 /*
Izik Eidus828502d2009-09-21 17:01:51 -070090 * change_pte is called in cases that pte mapping to page is changed:
91 * for example, when ksm remaps pte to point to a new shared page.
92 */
93 void (*change_pte)(struct mmu_notifier *mn,
94 struct mm_struct *mm,
95 unsigned long address,
96 pte_t pte);
97
98 /*
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070099 * invalidate_range_start() and invalidate_range_end() must be
100 * paired and are called only when the mmap_sem and/or the
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100101 * locks protecting the reverse maps are held. If the subsystem
102 * can't guarantee that no additional references are taken to
103 * the pages in the range, it has to implement the
104 * invalidate_range() notifier to remove any references taken
105 * after invalidate_range_start().
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700106 *
107 * Invalidation of multiple concurrent ranges may be
108 * optionally permitted by the driver. Either way the
109 * establishment of sptes is forbidden in the range passed to
110 * invalidate_range_begin/end for the whole duration of the
111 * invalidate_range_begin/end critical section.
112 *
113 * invalidate_range_start() is called when all pages in the
114 * range are still mapped and have at least a refcount of one.
115 *
116 * invalidate_range_end() is called when all pages in the
117 * range have been unmapped and the pages have been freed by
118 * the VM.
119 *
120 * The VM will remove the page table entries and potentially
121 * the page between invalidate_range_start() and
122 * invalidate_range_end(). If the page must not be freed
123 * because of pending I/O or other circumstances then the
124 * invalidate_range_start() callback (or the initial mapping
125 * by the driver) must make sure that the refcount is kept
126 * elevated.
127 *
128 * If the driver increases the refcount when the pages are
129 * initially mapped into an address space then either
130 * invalidate_range_start() or invalidate_range_end() may
131 * decrease the refcount. If the refcount is decreased on
132 * invalidate_range_start() then the VM can free pages as page
133 * table entries are removed. If the refcount is only
134 * droppped on invalidate_range_end() then the driver itself
135 * will drop the last refcount but it must take care to flush
136 * any secondary tlb before doing the final free on the
137 * page. Pages will no longer be referenced by the linux
138 * address space but may still be referenced by sptes until
139 * the last refcount is dropped.
David Rientjes5ff70912018-01-31 16:18:32 -0800140 *
Michal Hocko93065ac2018-08-21 21:52:33 -0700141 * If blockable argument is set to false then the callback cannot
142 * sleep and has to return with -EAGAIN. 0 should be returned
Michal Hocko33490af2018-10-26 15:03:35 -0700143 * otherwise. Please note that if invalidate_range_start approves
144 * a non-blocking behavior then the same applies to
145 * invalidate_range_end.
Michal Hocko93065ac2018-08-21 21:52:33 -0700146 *
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700147 */
Michal Hocko93065ac2018-08-21 21:52:33 -0700148 int (*invalidate_range_start)(struct mmu_notifier *mn,
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700149 struct mm_struct *mm,
Michal Hocko93065ac2018-08-21 21:52:33 -0700150 unsigned long start, unsigned long end,
151 bool blockable);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700152 void (*invalidate_range_end)(struct mmu_notifier *mn,
153 struct mm_struct *mm,
154 unsigned long start, unsigned long end);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100155
156 /*
157 * invalidate_range() is either called between
158 * invalidate_range_start() and invalidate_range_end() when the
159 * VM has to free pages that where unmapped, but before the
160 * pages are actually freed, or outside of _start()/_end() when
161 * a (remote) TLB is necessary.
162 *
163 * If invalidate_range() is used to manage a non-CPU TLB with
164 * shared page-tables, it not necessary to implement the
165 * invalidate_range_start()/end() notifiers, as
166 * invalidate_range() alread catches the points in time when an
Jérôme Glisse0f108512017-11-15 17:34:07 -0800167 * external TLB range needs to be flushed. For more in depth
Mike Rapoportad56b732018-03-21 21:22:47 +0200168 * discussion on this see Documentation/vm/mmu_notifier.rst
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100169 *
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100170 * Note that this function might be called with just a sub-range
171 * of what was passed to invalidate_range_start()/end(), if
172 * called between those functions.
173 */
174 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
175 unsigned long start, unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700176};
177
178/*
179 * The notifier chains are protected by mmap_sem and/or the reverse map
180 * semaphores. Notifier chains are only changed when all reverse maps and
181 * the mmap_sem locks are taken.
182 *
183 * Therefore notifier chains can only be traversed when either
184 *
185 * 1. mmap_sem is held.
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800186 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700187 * 3. No other concurrent thread can access the list (release)
188 */
189struct mmu_notifier {
190 struct hlist_node hlist;
191 const struct mmu_notifier_ops *ops;
192};
193
194static inline int mm_has_notifiers(struct mm_struct *mm)
195{
196 return unlikely(mm->mmu_notifier_mm);
197}
198
199extern int mmu_notifier_register(struct mmu_notifier *mn,
200 struct mm_struct *mm);
201extern int __mmu_notifier_register(struct mmu_notifier *mn,
202 struct mm_struct *mm);
203extern void mmu_notifier_unregister(struct mmu_notifier *mn,
204 struct mm_struct *mm);
Peter Zijlstrab9722162014-08-06 16:08:20 -0700205extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
206 struct mm_struct *mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700207extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
208extern void __mmu_notifier_release(struct mm_struct *mm);
209extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700210 unsigned long start,
211 unsigned long end);
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700212extern int __mmu_notifier_clear_young(struct mm_struct *mm,
213 unsigned long start,
214 unsigned long end);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800215extern int __mmu_notifier_test_young(struct mm_struct *mm,
216 unsigned long address);
Izik Eidus828502d2009-09-21 17:01:51 -0700217extern void __mmu_notifier_change_pte(struct mm_struct *mm,
218 unsigned long address, pte_t pte);
Michal Hocko93065ac2018-08-21 21:52:33 -0700219extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
220 unsigned long start, unsigned long end,
221 bool blockable);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700222extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800223 unsigned long start, unsigned long end,
224 bool only_end);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100225extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
226 unsigned long start, unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700227
228static inline void mmu_notifier_release(struct mm_struct *mm)
229{
230 if (mm_has_notifiers(mm))
231 __mmu_notifier_release(mm);
232}
233
234static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700235 unsigned long start,
236 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700237{
238 if (mm_has_notifiers(mm))
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700239 return __mmu_notifier_clear_flush_young(mm, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700240 return 0;
241}
242
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700243static inline int mmu_notifier_clear_young(struct mm_struct *mm,
244 unsigned long start,
245 unsigned long end)
246{
247 if (mm_has_notifiers(mm))
248 return __mmu_notifier_clear_young(mm, start, end);
249 return 0;
250}
251
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800252static inline int mmu_notifier_test_young(struct mm_struct *mm,
253 unsigned long address)
254{
255 if (mm_has_notifiers(mm))
256 return __mmu_notifier_test_young(mm, address);
257 return 0;
258}
259
Izik Eidus828502d2009-09-21 17:01:51 -0700260static inline void mmu_notifier_change_pte(struct mm_struct *mm,
261 unsigned long address, pte_t pte)
262{
263 if (mm_has_notifiers(mm))
264 __mmu_notifier_change_pte(mm, address, pte);
265}
266
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700267static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
268 unsigned long start, unsigned long end)
269{
270 if (mm_has_notifiers(mm))
Michal Hocko93065ac2018-08-21 21:52:33 -0700271 __mmu_notifier_invalidate_range_start(mm, start, end, true);
272}
273
274static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
275 unsigned long start, unsigned long end)
276{
277 if (mm_has_notifiers(mm))
278 return __mmu_notifier_invalidate_range_start(mm, start, end, false);
279 return 0;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700280}
281
282static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
283 unsigned long start, unsigned long end)
284{
285 if (mm_has_notifiers(mm))
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800286 __mmu_notifier_invalidate_range_end(mm, start, end, false);
287}
288
289static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
290 unsigned long start, unsigned long end)
291{
292 if (mm_has_notifiers(mm))
293 __mmu_notifier_invalidate_range_end(mm, start, end, true);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700294}
295
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100296static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
297 unsigned long start, unsigned long end)
298{
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100299 if (mm_has_notifiers(mm))
300 __mmu_notifier_invalidate_range(mm, start, end);
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100301}
302
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700303static inline void mmu_notifier_mm_init(struct mm_struct *mm)
304{
305 mm->mmu_notifier_mm = NULL;
306}
307
308static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
309{
310 if (mm_has_notifiers(mm))
311 __mmu_notifier_mm_destroy(mm);
312}
313
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700314#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
315({ \
316 int __young; \
317 struct vm_area_struct *___vma = __vma; \
318 unsigned long ___address = __address; \
319 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
320 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700321 ___address, \
322 ___address + \
323 PAGE_SIZE); \
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700324 __young; \
325})
326
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800327#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
328({ \
329 int __young; \
330 struct vm_area_struct *___vma = __vma; \
331 unsigned long ___address = __address; \
332 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
333 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700334 ___address, \
335 ___address + \
336 PMD_SIZE); \
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800337 __young; \
338})
339
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700340#define ptep_clear_young_notify(__vma, __address, __ptep) \
341({ \
342 int __young; \
343 struct vm_area_struct *___vma = __vma; \
344 unsigned long ___address = __address; \
345 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
346 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
347 ___address + PAGE_SIZE); \
348 __young; \
349})
350
351#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
352({ \
353 int __young; \
354 struct vm_area_struct *___vma = __vma; \
355 unsigned long ___address = __address; \
356 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
357 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
358 ___address + PMD_SIZE); \
359 __young; \
360})
361
Joerg Roedel34ee6452014-11-13 13:46:09 +1100362#define ptep_clear_flush_notify(__vma, __address, __ptep) \
363({ \
364 unsigned long ___addr = __address & PAGE_MASK; \
365 struct mm_struct *___mm = (__vma)->vm_mm; \
366 pte_t ___pte; \
367 \
368 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
369 mmu_notifier_invalidate_range(___mm, ___addr, \
370 ___addr + PAGE_SIZE); \
371 \
372 ___pte; \
373})
374
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700375#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
Joerg Roedel34ee6452014-11-13 13:46:09 +1100376({ \
377 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
378 struct mm_struct *___mm = (__vma)->vm_mm; \
379 pmd_t ___pmd; \
380 \
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700381 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
Joerg Roedel34ee6452014-11-13 13:46:09 +1100382 mmu_notifier_invalidate_range(___mm, ___haddr, \
383 ___haddr + HPAGE_PMD_SIZE); \
384 \
385 ___pmd; \
386})
387
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800388#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
389({ \
390 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
391 struct mm_struct *___mm = (__vma)->vm_mm; \
392 pud_t ___pud; \
393 \
394 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
395 mmu_notifier_invalidate_range(___mm, ___haddr, \
396 ___haddr + HPAGE_PUD_SIZE); \
397 \
398 ___pud; \
399})
400
Xiao Guangrong48af0d72012-10-08 16:29:23 -0700401/*
402 * set_pte_at_notify() sets the pte _after_ running the notifier.
403 * This is safe to start by updating the secondary MMUs, because the primary MMU
404 * pte invalidate must have already happened with a ptep_clear_flush() before
405 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
406 * required when we change both the protection of the mapping from read-only to
407 * read-write and the pfn (like during copy on write page faults). Otherwise the
408 * old page would remain mapped readonly in the secondary MMUs after the new
409 * page is already writable by some CPU through the primary MMU.
410 */
Izik Eidus828502d2009-09-21 17:01:51 -0700411#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
412({ \
413 struct mm_struct *___mm = __mm; \
414 unsigned long ___address = __address; \
415 pte_t ___pte = __pte; \
416 \
Izik Eidus828502d2009-09-21 17:01:51 -0700417 mmu_notifier_change_pte(___mm, ___address, ___pte); \
Xiao Guangrong48af0d72012-10-08 16:29:23 -0700418 set_pte_at(___mm, ___address, __ptep, ___pte); \
Izik Eidus828502d2009-09-21 17:01:51 -0700419})
420
Peter Zijlstrab9722162014-08-06 16:08:20 -0700421extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
422 void (*func)(struct rcu_head *rcu));
Peter Zijlstrab9722162014-08-06 16:08:20 -0700423
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700424#else /* CONFIG_MMU_NOTIFIER */
425
Michal Hocko4d4bbd82017-10-03 16:14:50 -0700426static inline int mm_has_notifiers(struct mm_struct *mm)
427{
428 return 0;
429}
430
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700431static inline void mmu_notifier_release(struct mm_struct *mm)
432{
433}
434
435static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700436 unsigned long start,
437 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700438{
439 return 0;
440}
441
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800442static inline int mmu_notifier_test_young(struct mm_struct *mm,
443 unsigned long address)
444{
445 return 0;
446}
447
Izik Eidus828502d2009-09-21 17:01:51 -0700448static inline void mmu_notifier_change_pte(struct mm_struct *mm,
449 unsigned long address, pte_t pte)
450{
451}
452
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700453static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
454 unsigned long start, unsigned long end)
455{
456}
457
Michal Hocko93065ac2018-08-21 21:52:33 -0700458static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
459 unsigned long start, unsigned long end)
460{
461 return 0;
462}
463
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700464static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
465 unsigned long start, unsigned long end)
466{
467}
468
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800469static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
470 unsigned long start, unsigned long end)
471{
472}
473
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100474static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
475 unsigned long start, unsigned long end)
476{
477}
478
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700479static inline void mmu_notifier_mm_init(struct mm_struct *mm)
480{
481}
482
483static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
484{
485}
486
487#define ptep_clear_flush_young_notify ptep_clear_flush_young
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800488#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700489#define ptep_clear_young_notify ptep_test_and_clear_young
490#define pmdp_clear_young_notify pmdp_test_and_clear_young
Joerg Roedel34ee6452014-11-13 13:46:09 +1100491#define ptep_clear_flush_notify ptep_clear_flush
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700492#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800493#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
Izik Eidus828502d2009-09-21 17:01:51 -0700494#define set_pte_at_notify set_pte_at
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700495
496#endif /* CONFIG_MMU_NOTIFIER */
497
498#endif /* _LINUX_MMU_NOTIFIER_H */