blob: 4c6790bb7afb6ed65b2742249e03f15496adcd14 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5/*
6 * Copyright 1995 Linus Torvalds
7 */
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080013#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/gfp.h>
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070015#include <linux/bitops.h>
Nick Piggine2867812008-07-25 19:45:30 -070016#include <linux/hardirq.h> /* for in_interrupt() */
Naoya Horiguchi8edf3442010-05-28 09:29:15 +090017#include <linux/hugetlb_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Jan Karaaa65c292017-11-15 17:37:33 -080019struct pagevec;
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021/*
Michal Hocko9c5d7602016-10-11 13:56:04 -070022 * Bits in mapping->flags.
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070024enum mapping_flags {
Michal Hocko9c5d7602016-10-11 13:56:04 -070025 AS_EIO = 0, /* IO error on async write */
26 AS_ENOSPC = 1, /* ENOSPC on async write */
27 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
28 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
29 AS_EXITING = 4, /* final truncate in progress */
Huang Ying371a0962016-10-07 16:59:30 -070030 /* writeback related tags are not used */
Michal Hocko9c5d7602016-10-11 13:56:04 -070031 AS_NO_WRITEBACK_TAGS = 5,
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070032};
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Jeff Layton8ed1e462017-07-06 07:02:26 -040034/**
35 * mapping_set_error - record a writeback error in the address_space
36 * @mapping - the mapping in which an error should be set
37 * @error - the error to set in the mapping
38 *
39 * When writeback fails in some way, we must record that error so that
40 * userspace can be informed when fsync and the like are called. We endeavor
41 * to report errors on any file that was open at the time of the error. Some
42 * internal callers also need to know when writeback errors have occurred.
43 *
44 * When a writeback error occurs, most filesystems will want to call
45 * mapping_set_error to record the error in the mapping so that it can be
46 * reported when the application calls fsync(2).
47 */
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070048static inline void mapping_set_error(struct address_space *mapping, int error)
49{
Jeff Layton8ed1e462017-07-06 07:02:26 -040050 if (likely(!error))
51 return;
52
53 /* Record in wb_err for checkers using errseq_t based tracking */
54 filemap_set_wb_err(mapping, error);
55
56 /* Record it in flags for now, for legacy callers */
57 if (error == -ENOSPC)
58 set_bit(AS_ENOSPC, &mapping->flags);
59 else
60 set_bit(AS_EIO, &mapping->flags);
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070061}
62
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070063static inline void mapping_set_unevictable(struct address_space *mapping)
64{
65 set_bit(AS_UNEVICTABLE, &mapping->flags);
66}
67
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070068static inline void mapping_clear_unevictable(struct address_space *mapping)
69{
70 clear_bit(AS_UNEVICTABLE, &mapping->flags);
71}
72
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070073static inline int mapping_unevictable(struct address_space *mapping)
74{
Steven Rostedt088e5462011-01-13 15:46:16 -080075 if (mapping)
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070076 return test_bit(AS_UNEVICTABLE, &mapping->flags);
77 return !!mapping;
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070078}
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070079
Johannes Weiner91b0abe2014-04-03 14:47:49 -070080static inline void mapping_set_exiting(struct address_space *mapping)
81{
82 set_bit(AS_EXITING, &mapping->flags);
83}
84
85static inline int mapping_exiting(struct address_space *mapping)
86{
87 return test_bit(AS_EXITING, &mapping->flags);
88}
89
Huang Ying371a0962016-10-07 16:59:30 -070090static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
91{
92 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
93}
94
95static inline int mapping_use_writeback_tags(struct address_space *mapping)
96{
97 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
98}
99
Al Virodd0fc662005-10-07 07:46:04 +0100100static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
Michal Hocko9c5d7602016-10-11 13:56:04 -0700102 return mapping->gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103}
104
Michal Hockoc62d2552015-11-06 16:28:49 -0800105/* Restricts the given gfp_mask to what the mapping allows. */
106static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107 gfp_t gfp_mask)
108{
109 return mapping_gfp_mask(mapping) & gfp_mask;
110}
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/*
113 * This is non-atomic. Only to be used before the mapping is activated.
114 * Probably needs a barrier...
115 */
Al Viro260b2362005-10-21 03:22:44 -0400116static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Michal Hocko9c5d7602016-10-11 13:56:04 -0700118 m->gfp_mask = mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Mel Gormanc6f92f92017-11-15 17:37:55 -0800121void release_pages(struct page **pages, int nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Nick Piggine2867812008-07-25 19:45:30 -0700123/*
124 * speculatively take a reference to a page.
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700125 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
126 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
Nick Piggine2867812008-07-25 19:45:30 -0700127 *
128 * This function must be called inside the same rcu_read_lock() section as has
129 * been used to lookup the page in the pagecache radix-tree (or page table):
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700130 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
Nick Piggine2867812008-07-25 19:45:30 -0700131 *
132 * Unless an RCU grace period has passed, the count of all pages coming out
133 * of the allocator must be considered unstable. page_count may return higher
134 * than expected, and put_page must be able to do the right thing when the
135 * page has been finished with, no matter what it is subsequently allocated
136 * for (because put_page is what is used here to drop an invalid speculative
137 * reference).
138 *
139 * This is the interesting part of the lockless pagecache (and lockless
140 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
141 * has the following pattern:
142 * 1. find page in radix tree
143 * 2. conditionally increment refcount
144 * 3. check the page is still in pagecache (if no, goto 1)
145 *
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700146 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
Nick Piggine2867812008-07-25 19:45:30 -0700147 * following (with tree_lock held for write):
148 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
149 * B. remove page from pagecache
150 * C. free the page
151 *
152 * There are 2 critical interleavings that matter:
153 * - 2 runs before A: in this case, A sees elevated refcount and bails out
154 * - A runs before 2: in this case, 2 sees zero refcount and retries;
155 * subsequently, B will complete and 1 will find no page, causing the
156 * lookup to return NULL.
157 *
158 * It is possible that between 1 and 2, the page is removed then the exact same
159 * page is inserted into the same position in pagecache. That's OK: the
160 * old find_get_page using tree_lock could equally have run before or after
161 * such a re-insertion, depending on order that locks are granted.
162 *
163 * Lookups racing against pagecache insertion isn't a big problem: either 1
164 * will find the page or it will not. Likewise, the old find_get_page could run
165 * either before the insertion or afterwards, depending on timing.
166 */
167static inline int page_cache_get_speculative(struct page *page)
168{
Paul E. McKenney8375ad92013-04-29 15:06:13 -0700169#ifdef CONFIG_TINY_RCU
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200170# ifdef CONFIG_PREEMPT_COUNT
Kirill A. Shutemov591a3d72017-03-24 14:13:05 +0300171 VM_BUG_ON(!in_atomic() && !irqs_disabled());
Nick Piggine2867812008-07-25 19:45:30 -0700172# endif
173 /*
174 * Preempt must be disabled here - we rely on rcu_read_lock doing
175 * this for us.
176 *
177 * Pagecache won't be truncated from interrupt context, so if we have
178 * found a page in the radix tree here, we have pinned its refcount by
179 * disabling preempt, and hence no need for the "speculative get" that
180 * SMP requires.
181 */
Sasha Levin309381fea2014-01-23 15:52:54 -0800182 VM_BUG_ON_PAGE(page_count(page) == 0, page);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700183 page_ref_inc(page);
Nick Piggine2867812008-07-25 19:45:30 -0700184
185#else
186 if (unlikely(!get_page_unless_zero(page))) {
187 /*
188 * Either the page has been freed, or will be freed.
189 * In either case, retry here and the caller should
190 * do the right thing (see comments above).
191 */
192 return 0;
193 }
194#endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800195 VM_BUG_ON_PAGE(PageTail(page), page);
Nick Piggine2867812008-07-25 19:45:30 -0700196
197 return 1;
198}
199
Nick Piggince0ad7f2008-07-30 15:23:13 +1000200/*
201 * Same as above, but add instead of inc (could just be merged)
202 */
203static inline int page_cache_add_speculative(struct page *page, int count)
204{
205 VM_BUG_ON(in_interrupt());
206
Paul E. McKenneyb560d8a2009-08-21 22:08:51 -0700207#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200208# ifdef CONFIG_PREEMPT_COUNT
Kirill A. Shutemov591a3d72017-03-24 14:13:05 +0300209 VM_BUG_ON(!in_atomic() && !irqs_disabled());
Nick Piggince0ad7f2008-07-30 15:23:13 +1000210# endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800211 VM_BUG_ON_PAGE(page_count(page) == 0, page);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700212 page_ref_add(page, count);
Nick Piggince0ad7f2008-07-30 15:23:13 +1000213
214#else
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700215 if (unlikely(!page_ref_add_unless(page, count, 0)))
Nick Piggince0ad7f2008-07-30 15:23:13 +1000216 return 0;
217#endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800218 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
Nick Piggince0ad7f2008-07-30 15:23:13 +1000219
220 return 1;
221}
222
Paul Jackson44110fe2006-03-24 03:16:04 -0800223#ifdef CONFIG_NUMA
Nick Piggin2ae88142006-10-28 10:38:23 -0700224extern struct page *__page_cache_alloc(gfp_t gfp);
Paul Jackson44110fe2006-03-24 03:16:04 -0800225#else
Nick Piggin2ae88142006-10-28 10:38:23 -0700226static inline struct page *__page_cache_alloc(gfp_t gfp)
227{
228 return alloc_pages(gfp, 0);
229}
230#endif
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232static inline struct page *page_cache_alloc(struct address_space *x)
233{
Nick Piggin2ae88142006-10-28 10:38:23 -0700234 return __page_cache_alloc(mapping_gfp_mask(x));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235}
236
237static inline struct page *page_cache_alloc_cold(struct address_space *x)
238{
Nick Piggin2ae88142006-10-28 10:38:23 -0700239 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Michal Hocko8a5c7432016-07-26 15:24:53 -0700242static inline gfp_t readahead_gfp_mask(struct address_space *x)
Wu Fengguang7b1de582011-05-24 17:12:25 -0700243{
Michal Hocko8a5c7432016-07-26 15:24:53 -0700244 return mapping_gfp_mask(x) |
245 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
Wu Fengguang7b1de582011-05-24 17:12:25 -0700246}
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248typedef int filler_t(void *, struct page *);
249
Johannes Weinere7b563b2014-04-03 14:47:44 -0700250pgoff_t page_cache_next_hole(struct address_space *mapping,
251 pgoff_t index, unsigned long max_scan);
252pgoff_t page_cache_prev_hole(struct address_space *mapping,
253 pgoff_t index, unsigned long max_scan);
254
Mel Gorman2457aec2014-06-04 16:10:31 -0700255#define FGP_ACCESSED 0x00000001
256#define FGP_LOCK 0x00000002
257#define FGP_CREAT 0x00000004
258#define FGP_WRITE 0x00000008
259#define FGP_NOFS 0x00000010
260#define FGP_NOWAIT 0x00000020
261
262struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
Michal Hocko45f87de2014-12-29 20:30:35 +0100263 int fgp_flags, gfp_t cache_gfp_mask);
Mel Gorman2457aec2014-06-04 16:10:31 -0700264
265/**
266 * find_get_page - find and get a page reference
267 * @mapping: the address_space to search
268 * @offset: the page index
269 *
270 * Looks up the page cache slot at @mapping & @offset. If there is a
271 * page cache page, it is returned with an increased refcount.
272 *
273 * Otherwise, %NULL is returned.
274 */
275static inline struct page *find_get_page(struct address_space *mapping,
276 pgoff_t offset)
277{
Michal Hocko45f87de2014-12-29 20:30:35 +0100278 return pagecache_get_page(mapping, offset, 0, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700279}
280
281static inline struct page *find_get_page_flags(struct address_space *mapping,
282 pgoff_t offset, int fgp_flags)
283{
Michal Hocko45f87de2014-12-29 20:30:35 +0100284 return pagecache_get_page(mapping, offset, fgp_flags, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700285}
286
287/**
288 * find_lock_page - locate, pin and lock a pagecache page
Mel Gorman2457aec2014-06-04 16:10:31 -0700289 * @mapping: the address_space to search
290 * @offset: the page index
291 *
292 * Looks up the page cache slot at @mapping & @offset. If there is a
293 * page cache page, it is returned locked and with an increased
294 * refcount.
295 *
296 * Otherwise, %NULL is returned.
297 *
298 * find_lock_page() may sleep.
299 */
300static inline struct page *find_lock_page(struct address_space *mapping,
301 pgoff_t offset)
302{
Michal Hocko45f87de2014-12-29 20:30:35 +0100303 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700304}
305
306/**
307 * find_or_create_page - locate or add a pagecache page
308 * @mapping: the page's address_space
309 * @index: the page's index into the mapping
310 * @gfp_mask: page allocation mode
311 *
312 * Looks up the page cache slot at @mapping & @offset. If there is a
313 * page cache page, it is returned locked and with an increased
314 * refcount.
315 *
316 * If the page is not present, a new page is allocated using @gfp_mask
317 * and added to the page cache and the VM's LRU list. The page is
318 * returned locked and with an increased refcount.
319 *
320 * On memory exhaustion, %NULL is returned.
321 *
322 * find_or_create_page() may sleep, even if @gfp_flags specifies an
323 * atomic allocation!
324 */
325static inline struct page *find_or_create_page(struct address_space *mapping,
326 pgoff_t offset, gfp_t gfp_mask)
327{
328 return pagecache_get_page(mapping, offset,
329 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
Michal Hocko45f87de2014-12-29 20:30:35 +0100330 gfp_mask);
Mel Gorman2457aec2014-06-04 16:10:31 -0700331}
332
333/**
334 * grab_cache_page_nowait - returns locked page at given index in given cache
335 * @mapping: target address_space
336 * @index: the page index
337 *
338 * Same as grab_cache_page(), but do not wait if the page is unavailable.
339 * This is intended for speculative data generators, where the data can
340 * be regenerated if the page couldn't be grabbed. This routine should
341 * be safe to call while holding the lock for another page.
342 *
343 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
344 * and deadlock against the caller's locked page.
345 */
346static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
347 pgoff_t index)
348{
349 return pagecache_get_page(mapping, index,
350 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
Michal Hocko45f87de2014-12-29 20:30:35 +0100351 mapping_gfp_mask(mapping));
Mel Gorman2457aec2014-06-04 16:10:31 -0700352}
353
Johannes Weiner0cd61442014-04-03 14:47:46 -0700354struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
Johannes Weiner0cd61442014-04-03 14:47:46 -0700355struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
Johannes Weiner0cd61442014-04-03 14:47:46 -0700356unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
357 unsigned int nr_entries, struct page **entries,
358 pgoff_t *indices);
Jan Karab947cee2017-09-06 16:21:21 -0700359unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
360 pgoff_t end, unsigned int nr_pages,
361 struct page **pages);
362static inline unsigned find_get_pages(struct address_space *mapping,
363 pgoff_t *start, unsigned int nr_pages,
364 struct page **pages)
365{
366 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
367 pages);
368}
Jens Axboeebf43502006-04-27 08:46:01 +0200369unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
370 unsigned int nr_pages, struct page **pages);
Jan Kara72b045a2017-11-15 17:34:33 -0800371unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
372 pgoff_t end, int tag, unsigned int nr_pages,
373 struct page **pages);
374static inline unsigned find_get_pages_tag(struct address_space *mapping,
375 pgoff_t *index, int tag, unsigned int nr_pages,
376 struct page **pages)
377{
378 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
379 nr_pages, pages);
380}
Ross Zwisler7e7f7742016-01-22 15:10:44 -0800381unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
382 int tag, unsigned int nr_entries,
383 struct page **entries, pgoff_t *indices);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Nick Piggin54566b22009-01-04 12:00:53 -0800385struct page *grab_cache_page_write_begin(struct address_space *mapping,
386 pgoff_t index, unsigned flags);
Nick Pigginafddba42007-10-16 01:25:01 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388/*
389 * Returns locked page at given index in given cache, creating it if needed.
390 */
Fengguang Wu57f6b962007-10-16 01:24:37 -0700391static inline struct page *grab_cache_page(struct address_space *mapping,
392 pgoff_t index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
394 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
395}
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397extern struct page * read_cache_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700398 pgoff_t index, filler_t *filler, void *data);
Linus Torvalds0531b2a2010-01-27 09:20:03 -0800399extern struct page * read_cache_page_gfp(struct address_space *mapping,
400 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401extern int read_cache_pages(struct address_space *mapping,
402 struct list_head *pages, filler_t *filler, void *data);
403
Pekka Enberg090d2b12006-06-23 02:05:08 -0700404static inline struct page *read_mapping_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700405 pgoff_t index, void *data)
Pekka Enberg090d2b12006-06-23 02:05:08 -0700406{
407 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
408 return read_cache_page(mapping, index, filler, data);
409}
410
Nick Piggine2867812008-07-25 19:45:30 -0700411/*
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800412 * Get index of the page with in radix-tree
413 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700414 */
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800415static inline pgoff_t page_to_index(struct page *page)
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700416{
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800417 pgoff_t pgoff;
418
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800419 if (likely(!PageTransTail(page)))
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300420 return page->index;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800421
422 /*
423 * We don't initialize ->index for tail pages: calculate based on
424 * head page
425 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300426 pgoff = compound_head(page)->index;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800427 pgoff += page - compound_head(page);
428 return pgoff;
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700429}
430
431/*
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800432 * Get the offset in PAGE_SIZE.
433 * (TODO: hugepage should have ->index in PAGE_SIZE)
434 */
435static inline pgoff_t page_to_pgoff(struct page *page)
436{
437 if (unlikely(PageHeadHuge(page)))
438 return page->index << compound_order(page);
439
440 return page_to_index(page);
441}
442
443/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 * Return byte-offset into filesystem object for page.
445 */
446static inline loff_t page_offset(struct page *page)
447{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300448 return ((loff_t)page->index) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449}
450
Mel Gormanf981c592012-07-31 16:44:47 -0700451static inline loff_t page_file_offset(struct page *page)
452{
Huang Ying8cd79782016-10-07 17:00:24 -0700453 return ((loff_t)page_index(page)) << PAGE_SHIFT;
Mel Gormanf981c592012-07-31 16:44:47 -0700454}
455
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900456extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
457 unsigned long address);
458
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
460 unsigned long address)
461{
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900462 pgoff_t pgoff;
463 if (unlikely(is_vm_hugetlb_page(vma)))
464 return linear_hugepage_index(vma, address);
465 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 pgoff += vma->vm_pgoff;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300467 return pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468}
469
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800470extern void __lock_page(struct page *page);
471extern int __lock_page_killable(struct page *page);
Michel Lespinassed065bd82010-10-26 14:21:57 -0700472extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
473 unsigned int flags);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800474extern void unlock_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Nick Piggin529ae9a2008-08-02 12:01:03 +0200476static inline int trylock_page(struct page *page)
477{
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800478 page = compound_head(page);
Nick Piggin8413ac92008-10-18 20:26:59 -0700479 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
Nick Piggin529ae9a2008-08-02 12:01:03 +0200480}
481
Nick Piggindb376482006-09-25 23:31:24 -0700482/*
483 * lock_page may only be called if we have the page's inode pinned.
484 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485static inline void lock_page(struct page *page)
486{
487 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200488 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 __lock_page(page);
490}
Nick Piggindb376482006-09-25 23:31:24 -0700491
492/*
Matthew Wilcox2687a352007-12-06 11:18:49 -0500493 * lock_page_killable is like lock_page but can be interrupted by fatal
494 * signals. It returns 0 if it locked the page and -EINTR if it was
495 * killed while waiting.
496 */
497static inline int lock_page_killable(struct page *page)
498{
499 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200500 if (!trylock_page(page))
Matthew Wilcox2687a352007-12-06 11:18:49 -0500501 return __lock_page_killable(page);
502 return 0;
503}
504
505/*
Michel Lespinassed065bd82010-10-26 14:21:57 -0700506 * lock_page_or_retry - Lock the page, unless this would block and the
507 * caller indicated that it can handle a retry.
Paul Cassella9a95f3c2014-08-06 16:07:24 -0700508 *
509 * Return value and mmap_sem implications depend on flags; see
510 * __lock_page_or_retry().
Michel Lespinassed065bd82010-10-26 14:21:57 -0700511 */
512static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
513 unsigned int flags)
514{
515 might_sleep();
516 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
517}
518
519/*
Nicholas Piggin74d81bf2017-02-22 15:44:41 -0800520 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
521 * and should not be used directly.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800523extern void wait_on_page_bit(struct page *page, int bit_nr);
KOSAKI Motohirof62e00c2011-05-24 17:11:29 -0700524extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
NeilBrowna4796e372014-09-24 11:28:32 +1000525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526/*
527 * Wait for a page to be unlocked.
528 *
529 * This must be called with the caller "holding" the page,
530 * ie with increased "page->count" so that the page won't
531 * go away during the wait..
532 */
533static inline void wait_on_page_locked(struct page *page)
534{
535 if (PageLocked(page))
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800536 wait_on_page_bit(compound_head(page), PG_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537}
538
Nicholas Piggin62906022016-12-25 13:00:30 +1000539static inline int wait_on_page_locked_killable(struct page *page)
540{
541 if (!PageLocked(page))
542 return 0;
543 return wait_on_page_bit_killable(compound_head(page), PG_locked);
544}
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546/*
547 * Wait for a page to complete writeback
548 */
549static inline void wait_on_page_writeback(struct page *page)
550{
551 if (PageWriteback(page))
552 wait_on_page_bit(page, PG_writeback);
553}
554
555extern void end_page_writeback(struct page *page);
Darrick J. Wong1d1d1a72013-02-21 16:42:51 -0800556void wait_for_stable_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Jens Axboec11f0c02016-08-05 08:11:04 -0600558void page_endio(struct page *page, bool is_write, int err);
Matthew Wilcox57d99842014-06-04 16:07:45 -0700559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560/*
David Howells385e1ca5f2009-04-03 16:42:39 +0100561 * Add an arbitrary waiter to a page's wait queue
562 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200563extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
David Howells385e1ca5f2009-04-03 16:42:39 +0100564
565/*
Al Viro4bce9f6e2016-09-17 18:02:44 -0400566 * Fault everything in given userspace address range in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 */
568static inline int fault_in_pages_writeable(char __user *uaddr, int size)
569{
Daniel Vetter99237772012-04-14 18:03:10 +0200570 char __user *end = uaddr + size - 1;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200571
572 if (unlikely(size == 0))
Al Viroe23d4152016-09-20 20:07:42 +0100573 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200574
Al Viroe23d4152016-09-20 20:07:42 +0100575 if (unlikely(uaddr > end))
576 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200577 /*
578 * Writing zeroes into userspace here is OK, because we know that if
579 * the zero gets there, we'll be overwriting it.
580 */
Al Viroe23d4152016-09-20 20:07:42 +0100581 do {
582 if (unlikely(__put_user(0, uaddr) != 0))
583 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200584 uaddr += PAGE_SIZE;
Al Viroe23d4152016-09-20 20:07:42 +0100585 } while (uaddr <= end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200586
587 /* Check whether the range spilled into the next page. */
588 if (((unsigned long)uaddr & PAGE_MASK) ==
589 ((unsigned long)end & PAGE_MASK))
Al Viroe23d4152016-09-20 20:07:42 +0100590 return __put_user(0, end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200591
Al Viroe23d4152016-09-20 20:07:42 +0100592 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200593}
594
Al Viro4bce9f6e2016-09-17 18:02:44 -0400595static inline int fault_in_pages_readable(const char __user *uaddr, int size)
Daniel Vetterf56f8212012-03-25 19:47:41 +0200596{
597 volatile char c;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200598 const char __user *end = uaddr + size - 1;
599
600 if (unlikely(size == 0))
Al Viroe23d4152016-09-20 20:07:42 +0100601 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200602
Al Viroe23d4152016-09-20 20:07:42 +0100603 if (unlikely(uaddr > end))
604 return -EFAULT;
605
606 do {
607 if (unlikely(__get_user(c, uaddr) != 0))
608 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200609 uaddr += PAGE_SIZE;
Al Viroe23d4152016-09-20 20:07:42 +0100610 } while (uaddr <= end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200611
612 /* Check whether the range spilled into the next page. */
613 if (((unsigned long)uaddr & PAGE_MASK) ==
614 ((unsigned long)end & PAGE_MASK)) {
Al Viroe23d4152016-09-20 20:07:42 +0100615 return __get_user(c, end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200616 }
617
Dave Chinner90b75db2016-09-26 09:57:33 +1000618 (void)c;
Al Viroe23d4152016-09-20 20:07:42 +0100619 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200620}
621
Nick Piggin529ae9a2008-08-02 12:01:03 +0200622int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
623 pgoff_t index, gfp_t gfp_mask);
624int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
625 pgoff_t index, gfp_t gfp_mask);
Minchan Kim97cecb52011-03-22 16:30:53 -0700626extern void delete_from_page_cache(struct page *page);
Johannes Weiner62cccb82016-03-15 14:57:22 -0700627extern void __delete_from_page_cache(struct page *page, void *shadow);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700628int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
Jan Karaaa65c292017-11-15 17:37:33 -0800629void delete_from_page_cache_batch(struct address_space *mapping,
630 struct pagevec *pvec);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200631
632/*
633 * Like add_to_page_cache_locked, but used to add newly allocated pages:
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800634 * the page is new, so we can just run __SetPageLocked() against it.
Nick Piggin529ae9a2008-08-02 12:01:03 +0200635 */
636static inline int add_to_page_cache(struct page *page,
637 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
638{
639 int error;
640
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800641 __SetPageLocked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200642 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
643 if (unlikely(error))
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800644 __ClearPageLocked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200645 return error;
646}
647
Fabian Frederickb57c2cb2015-05-24 17:19:41 +0200648static inline unsigned long dir_pages(struct inode *inode)
649{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300650 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
651 PAGE_SHIFT;
Fabian Frederickb57c2cb2015-05-24 17:19:41 +0200652}
653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654#endif /* _LINUX_PAGEMAP_H */