blob: 4d961668e5fcf4f7bd7be5cad47f542a6c0df905 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07002#ifndef _LINUX_SWAPOPS_H
3#define _LINUX_SWAPOPS_H
4
5#include <linux/radix-tree.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05006#include <linux/bug.h>
Souptick Joarder2b740302018-08-23 17:01:36 -07007#include <linux/mm_types.h>
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009/*
10 * swapcache pages are stored in the swapper_space radix tree. We want to
11 * get good packing density in that tree, so the index should be dense in
12 * the low-order bits.
13 *
Hugh Dickins9b15b812012-06-15 17:55:50 -070014 * We arrange the `type' and `offset' fields so that `type' is at the seven
Paolo 'Blaisorblade' Giarrussoe83a9592005-09-03 15:54:53 -070015 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
Hugh Dickins9b15b812012-06-15 17:55:50 -070016 * remaining bits. Although `type' itself needs only five bits, we allow for
17 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 *
19 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
20 */
Matthew Wilcox3159f942017-11-03 13:30:42 -040021#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
22#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24/*
25 * Store a type+offset into a swp_entry_t in an arch-independent format
26 */
27static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
28{
29 swp_entry_t ret;
30
Matthew Wilcox3159f942017-11-03 13:30:42 -040031 ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 return ret;
33}
34
35/*
36 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
37 * arch-independent format
38 */
39static inline unsigned swp_type(swp_entry_t entry)
40{
Matthew Wilcox3159f942017-11-03 13:30:42 -040041 return (entry.val >> SWP_TYPE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042}
43
44/*
45 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
46 * arch-independent format
47 */
48static inline pgoff_t swp_offset(swp_entry_t entry)
49{
Matthew Wilcox3159f942017-11-03 13:30:42 -040050 return entry.val & SWP_OFFSET_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051}
52
Matt Mackall880cdf32008-02-09 00:10:12 -080053#ifdef CONFIG_MMU
Matt Mackall698dd4b2008-02-04 22:29:00 -080054/* check whether a pte points to a swap entry */
55static inline int is_swap_pte(pte_t pte)
56{
Mel Gorman21d9ee32015-02-12 14:58:32 -080057 return !pte_none(pte) && !pte_present(pte);
Matt Mackall698dd4b2008-02-04 22:29:00 -080058}
Matt Mackall880cdf32008-02-09 00:10:12 -080059#endif
Matt Mackall698dd4b2008-02-04 22:29:00 -080060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
62 * Convert the arch-dependent pte representation of a swp_entry_t into an
63 * arch-independent swp_entry_t.
64 */
65static inline swp_entry_t pte_to_swp_entry(pte_t pte)
66{
67 swp_entry_t arch_entry;
68
Cyrill Gorcunov179ef712013-08-13 16:00:49 -070069 if (pte_swp_soft_dirty(pte))
70 pte = pte_swp_clear_soft_dirty(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 arch_entry = __pte_to_swp_entry(pte);
72 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
73}
74
75/*
76 * Convert the arch-independent representation of a swp_entry_t into the
77 * arch-dependent pte representation.
78 */
79static inline pte_t swp_entry_to_pte(swp_entry_t entry)
80{
81 swp_entry_t arch_entry;
82
83 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 return __swp_entry_to_pte(arch_entry);
85}
Christoph Lameter06972122006-06-23 02:03:35 -070086
Hugh Dickinsa2c16d62011-08-03 16:21:19 -070087static inline swp_entry_t radix_to_swp_entry(void *arg)
88{
89 swp_entry_t entry;
90
Matthew Wilcox3159f942017-11-03 13:30:42 -040091 entry.val = xa_to_value(arg);
Hugh Dickinsa2c16d62011-08-03 16:21:19 -070092 return entry;
93}
94
95static inline void *swp_to_radix_entry(swp_entry_t entry)
96{
Matthew Wilcox3159f942017-11-03 13:30:42 -040097 return xa_mk_value(entry.val);
Hugh Dickinsa2c16d62011-08-03 16:21:19 -070098}
99
Jérôme Glisse5042db42017-09-08 16:11:43 -0700100#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
101static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
102{
103 return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
104 page_to_pfn(page));
105}
106
107static inline bool is_device_private_entry(swp_entry_t entry)
108{
109 int type = swp_type(entry);
110 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
111}
112
113static inline void make_device_private_entry_read(swp_entry_t *entry)
114{
115 *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
116}
117
118static inline bool is_write_device_private_entry(swp_entry_t entry)
119{
120 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
121}
122
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300123static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
124{
125 return swp_offset(entry);
126}
127
Jérôme Glisse5042db42017-09-08 16:11:43 -0700128static inline struct page *device_private_entry_to_page(swp_entry_t entry)
129{
130 return pfn_to_page(swp_offset(entry));
131}
132
Souptick Joarder2b740302018-08-23 17:01:36 -0700133vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
Jérôme Glisse5042db42017-09-08 16:11:43 -0700134 unsigned long addr,
135 swp_entry_t entry,
136 unsigned int flags,
137 pmd_t *pmdp);
138#else /* CONFIG_DEVICE_PRIVATE */
139static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
140{
141 return swp_entry(0, 0);
142}
143
144static inline void make_device_private_entry_read(swp_entry_t *entry)
145{
146}
147
148static inline bool is_device_private_entry(swp_entry_t entry)
149{
150 return false;
151}
152
153static inline bool is_write_device_private_entry(swp_entry_t entry)
154{
155 return false;
156}
157
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300158static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
159{
160 return 0;
161}
162
Jérôme Glisse5042db42017-09-08 16:11:43 -0700163static inline struct page *device_private_entry_to_page(swp_entry_t entry)
164{
165 return NULL;
166}
167
Souptick Joarder2b740302018-08-23 17:01:36 -0700168static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
Jérôme Glisse5042db42017-09-08 16:11:43 -0700169 unsigned long addr,
170 swp_entry_t entry,
171 unsigned int flags,
172 pmd_t *pmdp)
173{
174 return VM_FAULT_SIGBUS;
175}
176#endif /* CONFIG_DEVICE_PRIVATE */
177
Christoph Lameter06972122006-06-23 02:03:35 -0700178#ifdef CONFIG_MIGRATION
179static inline swp_entry_t make_migration_entry(struct page *page, int write)
180{
Zi Yan616b8372017-09-08 16:10:57 -0700181 BUG_ON(!PageLocked(compound_head(page)));
182
Christoph Lameter06972122006-06-23 02:03:35 -0700183 return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
184 page_to_pfn(page));
185}
186
187static inline int is_migration_entry(swp_entry_t entry)
188{
189 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
190 swp_type(entry) == SWP_MIGRATION_WRITE);
191}
192
193static inline int is_write_migration_entry(swp_entry_t entry)
194{
195 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
196}
197
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300198static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
199{
200 return swp_offset(entry);
201}
202
Christoph Lameter06972122006-06-23 02:03:35 -0700203static inline struct page *migration_entry_to_page(swp_entry_t entry)
204{
205 struct page *p = pfn_to_page(swp_offset(entry));
206 /*
207 * Any use of migration entries may only occur while the
208 * corresponding page is locked
209 */
Zi Yan616b8372017-09-08 16:10:57 -0700210 BUG_ON(!PageLocked(compound_head(p)));
Christoph Lameter06972122006-06-23 02:03:35 -0700211 return p;
212}
213
214static inline void make_migration_entry_read(swp_entry_t *entry)
215{
216 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
217}
218
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800219extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
220 spinlock_t *ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700221extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
222 unsigned long address);
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800223extern void migration_entry_wait_huge(struct vm_area_struct *vma,
224 struct mm_struct *mm, pte_t *pte);
Christoph Lameter06972122006-06-23 02:03:35 -0700225#else
226
227#define make_migration_entry(page, write) swp_entry(0, 0)
Andrew Morton5ec553a2007-02-20 13:57:50 -0800228static inline int is_migration_entry(swp_entry_t swp)
229{
230 return 0;
231}
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300232
233static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
234{
235 return 0;
236}
237
Zi Yan616b8372017-09-08 16:10:57 -0700238static inline struct page *migration_entry_to_page(swp_entry_t entry)
239{
240 return NULL;
241}
242
Christoph Lameter06972122006-06-23 02:03:35 -0700243static inline void make_migration_entry_read(swp_entry_t *entryp) { }
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800244static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
245 spinlock_t *ptl) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700246static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
247 unsigned long address) { }
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800248static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
249 struct mm_struct *mm, pte_t *pte) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700250static inline int is_write_migration_entry(swp_entry_t entry)
251{
252 return 0;
253}
254
255#endif
256
Zi Yan616b8372017-09-08 16:10:57 -0700257struct page_vma_mapped_walk;
258
259#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
260extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
261 struct page *page);
262
263extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
264 struct page *new);
265
266extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
267
268static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
269{
270 swp_entry_t arch_entry;
271
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -0700272 if (pmd_swp_soft_dirty(pmd))
273 pmd = pmd_swp_clear_soft_dirty(pmd);
Zi Yan616b8372017-09-08 16:10:57 -0700274 arch_entry = __pmd_to_swp_entry(pmd);
275 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
276}
277
278static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
279{
280 swp_entry_t arch_entry;
281
282 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
283 return __swp_entry_to_pmd(arch_entry);
284}
285
286static inline int is_pmd_migration_entry(pmd_t pmd)
287{
288 return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
289}
290#else
291static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
292 struct page *page)
293{
294 BUILD_BUG();
295}
296
297static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
298 struct page *new)
299{
300 BUILD_BUG();
301}
302
303static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
304
305static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
306{
307 return swp_entry(0, 0);
308}
309
310static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
311{
312 return __pmd(0);
313}
314
315static inline int is_pmd_migration_entry(pmd_t pmd)
316{
317 return 0;
318}
319#endif
320
Andi Kleena7420aa2009-09-16 11:50:05 +0200321#ifdef CONFIG_MEMORY_FAILURE
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700322
323extern atomic_long_t num_poisoned_pages __read_mostly;
324
Andi Kleena7420aa2009-09-16 11:50:05 +0200325/*
326 * Support for hardware poisoned pages
327 */
328static inline swp_entry_t make_hwpoison_entry(struct page *page)
329{
330 BUG_ON(!PageLocked(page));
331 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
332}
333
334static inline int is_hwpoison_entry(swp_entry_t entry)
335{
336 return swp_type(entry) == SWP_HWPOISON;
337}
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700338
339static inline void num_poisoned_pages_inc(void)
340{
341 atomic_long_inc(&num_poisoned_pages);
342}
343
344static inline void num_poisoned_pages_dec(void)
345{
346 atomic_long_dec(&num_poisoned_pages);
347}
348
Andi Kleena7420aa2009-09-16 11:50:05 +0200349#else
350
351static inline swp_entry_t make_hwpoison_entry(struct page *page)
352{
353 return swp_entry(0, 0);
354}
355
356static inline int is_hwpoison_entry(swp_entry_t swp)
357{
358 return 0;
359}
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700360
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700361static inline void num_poisoned_pages_inc(void)
362{
363}
Andi Kleena7420aa2009-09-16 11:50:05 +0200364#endif
365
366#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
367static inline int non_swap_entry(swp_entry_t entry)
368{
369 return swp_type(entry) >= MAX_SWAPFILES;
370}
371#else
372static inline int non_swap_entry(swp_entry_t entry)
373{
374 return 0;
375}
376#endif
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700377
378#endif /* _LINUX_SWAPOPS_H */