blob: b477a70cc2e4587fd828ee921ce0b9ec2138775d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5/*
6 * Copyright 1995 Linus Torvalds
7 */
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080013#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/gfp.h>
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070015#include <linux/bitops.h>
Nick Piggine2867812008-07-25 19:45:30 -070016#include <linux/hardirq.h> /* for in_interrupt() */
Naoya Horiguchi8edf3442010-05-28 09:29:15 +090017#include <linux/hugetlb_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Jan Karaaa65c292017-11-15 17:37:33 -080019struct pagevec;
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021/*
Michal Hocko9c5d7602016-10-11 13:56:04 -070022 * Bits in mapping->flags.
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070024enum mapping_flags {
Michal Hocko9c5d7602016-10-11 13:56:04 -070025 AS_EIO = 0, /* IO error on async write */
26 AS_ENOSPC = 1, /* ENOSPC on async write */
27 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
28 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
29 AS_EXITING = 4, /* final truncate in progress */
Huang Ying371a0962016-10-07 16:59:30 -070030 /* writeback related tags are not used */
Michal Hocko9c5d7602016-10-11 13:56:04 -070031 AS_NO_WRITEBACK_TAGS = 5,
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070032};
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Jeff Layton8ed1e462017-07-06 07:02:26 -040034/**
35 * mapping_set_error - record a writeback error in the address_space
36 * @mapping - the mapping in which an error should be set
37 * @error - the error to set in the mapping
38 *
39 * When writeback fails in some way, we must record that error so that
40 * userspace can be informed when fsync and the like are called. We endeavor
41 * to report errors on any file that was open at the time of the error. Some
42 * internal callers also need to know when writeback errors have occurred.
43 *
44 * When a writeback error occurs, most filesystems will want to call
45 * mapping_set_error to record the error in the mapping so that it can be
46 * reported when the application calls fsync(2).
47 */
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070048static inline void mapping_set_error(struct address_space *mapping, int error)
49{
Jeff Layton8ed1e462017-07-06 07:02:26 -040050 if (likely(!error))
51 return;
52
53 /* Record in wb_err for checkers using errseq_t based tracking */
54 filemap_set_wb_err(mapping, error);
55
56 /* Record it in flags for now, for legacy callers */
57 if (error == -ENOSPC)
58 set_bit(AS_ENOSPC, &mapping->flags);
59 else
60 set_bit(AS_EIO, &mapping->flags);
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070061}
62
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070063static inline void mapping_set_unevictable(struct address_space *mapping)
64{
65 set_bit(AS_UNEVICTABLE, &mapping->flags);
66}
67
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070068static inline void mapping_clear_unevictable(struct address_space *mapping)
69{
70 clear_bit(AS_UNEVICTABLE, &mapping->flags);
71}
72
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070073static inline int mapping_unevictable(struct address_space *mapping)
74{
Steven Rostedt088e5462011-01-13 15:46:16 -080075 if (mapping)
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070076 return test_bit(AS_UNEVICTABLE, &mapping->flags);
77 return !!mapping;
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070078}
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070079
Johannes Weiner91b0abe2014-04-03 14:47:49 -070080static inline void mapping_set_exiting(struct address_space *mapping)
81{
82 set_bit(AS_EXITING, &mapping->flags);
83}
84
85static inline int mapping_exiting(struct address_space *mapping)
86{
87 return test_bit(AS_EXITING, &mapping->flags);
88}
89
Huang Ying371a0962016-10-07 16:59:30 -070090static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
91{
92 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
93}
94
95static inline int mapping_use_writeback_tags(struct address_space *mapping)
96{
97 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
98}
99
Al Virodd0fc662005-10-07 07:46:04 +0100100static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
Michal Hocko9c5d7602016-10-11 13:56:04 -0700102 return mapping->gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103}
104
Michal Hockoc62d2552015-11-06 16:28:49 -0800105/* Restricts the given gfp_mask to what the mapping allows. */
106static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107 gfp_t gfp_mask)
108{
109 return mapping_gfp_mask(mapping) & gfp_mask;
110}
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/*
113 * This is non-atomic. Only to be used before the mapping is activated.
114 * Probably needs a barrier...
115 */
Al Viro260b2362005-10-21 03:22:44 -0400116static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Michal Hocko9c5d7602016-10-11 13:56:04 -0700118 m->gfp_mask = mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Mel Gormanc6f92f92017-11-15 17:37:55 -0800121void release_pages(struct page **pages, int nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Nick Piggine2867812008-07-25 19:45:30 -0700123/*
124 * speculatively take a reference to a page.
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700125 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
126 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
Nick Piggine2867812008-07-25 19:45:30 -0700127 *
128 * This function must be called inside the same rcu_read_lock() section as has
129 * been used to lookup the page in the pagecache radix-tree (or page table):
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700130 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
Nick Piggine2867812008-07-25 19:45:30 -0700131 *
132 * Unless an RCU grace period has passed, the count of all pages coming out
133 * of the allocator must be considered unstable. page_count may return higher
134 * than expected, and put_page must be able to do the right thing when the
135 * page has been finished with, no matter what it is subsequently allocated
136 * for (because put_page is what is used here to drop an invalid speculative
137 * reference).
138 *
139 * This is the interesting part of the lockless pagecache (and lockless
140 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
141 * has the following pattern:
142 * 1. find page in radix tree
143 * 2. conditionally increment refcount
144 * 3. check the page is still in pagecache (if no, goto 1)
145 *
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700146 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700147 * following (with the i_pages lock held):
Nick Piggine2867812008-07-25 19:45:30 -0700148 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
149 * B. remove page from pagecache
150 * C. free the page
151 *
152 * There are 2 critical interleavings that matter:
153 * - 2 runs before A: in this case, A sees elevated refcount and bails out
154 * - A runs before 2: in this case, 2 sees zero refcount and retries;
155 * subsequently, B will complete and 1 will find no page, causing the
156 * lookup to return NULL.
157 *
158 * It is possible that between 1 and 2, the page is removed then the exact same
159 * page is inserted into the same position in pagecache. That's OK: the
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700160 * old find_get_page using a lock could equally have run before or after
Nick Piggine2867812008-07-25 19:45:30 -0700161 * such a re-insertion, depending on order that locks are granted.
162 *
163 * Lookups racing against pagecache insertion isn't a big problem: either 1
164 * will find the page or it will not. Likewise, the old find_get_page could run
165 * either before the insertion or afterwards, depending on timing.
166 */
john.hubbard@gmail.com494eec72019-03-05 15:48:49 -0800167static inline int __page_cache_add_speculative(struct page *page, int count)
Nick Piggine2867812008-07-25 19:45:30 -0700168{
Paul E. McKenney8375ad92013-04-29 15:06:13 -0700169#ifdef CONFIG_TINY_RCU
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200170# ifdef CONFIG_PREEMPT_COUNT
Kirill A. Shutemov591a3d72017-03-24 14:13:05 +0300171 VM_BUG_ON(!in_atomic() && !irqs_disabled());
Nick Piggine2867812008-07-25 19:45:30 -0700172# endif
173 /*
174 * Preempt must be disabled here - we rely on rcu_read_lock doing
175 * this for us.
176 *
177 * Pagecache won't be truncated from interrupt context, so if we have
178 * found a page in the radix tree here, we have pinned its refcount by
179 * disabling preempt, and hence no need for the "speculative get" that
180 * SMP requires.
181 */
Sasha Levin309381fea2014-01-23 15:52:54 -0800182 VM_BUG_ON_PAGE(page_count(page) == 0, page);
john.hubbard@gmail.com494eec72019-03-05 15:48:49 -0800183 page_ref_add(page, count);
Nick Piggine2867812008-07-25 19:45:30 -0700184
185#else
john.hubbard@gmail.com494eec72019-03-05 15:48:49 -0800186 if (unlikely(!page_ref_add_unless(page, count, 0))) {
Nick Piggine2867812008-07-25 19:45:30 -0700187 /*
188 * Either the page has been freed, or will be freed.
189 * In either case, retry here and the caller should
190 * do the right thing (see comments above).
191 */
192 return 0;
193 }
194#endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800195 VM_BUG_ON_PAGE(PageTail(page), page);
Nick Piggine2867812008-07-25 19:45:30 -0700196
197 return 1;
198}
199
john.hubbard@gmail.com494eec72019-03-05 15:48:49 -0800200static inline int page_cache_get_speculative(struct page *page)
201{
202 return __page_cache_add_speculative(page, 1);
203}
204
Nick Piggince0ad7f2008-07-30 15:23:13 +1000205static inline int page_cache_add_speculative(struct page *page, int count)
206{
john.hubbard@gmail.com494eec72019-03-05 15:48:49 -0800207 return __page_cache_add_speculative(page, count);
Nick Piggince0ad7f2008-07-30 15:23:13 +1000208}
209
Paul Jackson44110fe2006-03-24 03:16:04 -0800210#ifdef CONFIG_NUMA
Nick Piggin2ae88142006-10-28 10:38:23 -0700211extern struct page *__page_cache_alloc(gfp_t gfp);
Paul Jackson44110fe2006-03-24 03:16:04 -0800212#else
Nick Piggin2ae88142006-10-28 10:38:23 -0700213static inline struct page *__page_cache_alloc(gfp_t gfp)
214{
215 return alloc_pages(gfp, 0);
216}
217#endif
218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219static inline struct page *page_cache_alloc(struct address_space *x)
220{
Nick Piggin2ae88142006-10-28 10:38:23 -0700221 return __page_cache_alloc(mapping_gfp_mask(x));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
223
Michal Hocko8a5c7432016-07-26 15:24:53 -0700224static inline gfp_t readahead_gfp_mask(struct address_space *x)
Wu Fengguang7b1de582011-05-24 17:12:25 -0700225{
Mel Gorman453f85d2017-11-15 17:38:03 -0800226 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
Wu Fengguang7b1de582011-05-24 17:12:25 -0700227}
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229typedef int filler_t(void *, struct page *);
230
Matthew Wilcox0d3f9292017-11-21 14:07:06 -0500231pgoff_t page_cache_next_miss(struct address_space *mapping,
Johannes Weinere7b563b2014-04-03 14:47:44 -0700232 pgoff_t index, unsigned long max_scan);
Matthew Wilcox0d3f9292017-11-21 14:07:06 -0500233pgoff_t page_cache_prev_miss(struct address_space *mapping,
Johannes Weinere7b563b2014-04-03 14:47:44 -0700234 pgoff_t index, unsigned long max_scan);
235
Mel Gorman2457aec2014-06-04 16:10:31 -0700236#define FGP_ACCESSED 0x00000001
237#define FGP_LOCK 0x00000002
238#define FGP_CREAT 0x00000004
239#define FGP_WRITE 0x00000008
240#define FGP_NOFS 0x00000010
241#define FGP_NOWAIT 0x00000020
242
243struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
Michal Hocko45f87de2014-12-29 20:30:35 +0100244 int fgp_flags, gfp_t cache_gfp_mask);
Mel Gorman2457aec2014-06-04 16:10:31 -0700245
246/**
247 * find_get_page - find and get a page reference
248 * @mapping: the address_space to search
249 * @offset: the page index
250 *
251 * Looks up the page cache slot at @mapping & @offset. If there is a
252 * page cache page, it is returned with an increased refcount.
253 *
254 * Otherwise, %NULL is returned.
255 */
256static inline struct page *find_get_page(struct address_space *mapping,
257 pgoff_t offset)
258{
Michal Hocko45f87de2014-12-29 20:30:35 +0100259 return pagecache_get_page(mapping, offset, 0, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700260}
261
262static inline struct page *find_get_page_flags(struct address_space *mapping,
263 pgoff_t offset, int fgp_flags)
264{
Michal Hocko45f87de2014-12-29 20:30:35 +0100265 return pagecache_get_page(mapping, offset, fgp_flags, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700266}
267
268/**
269 * find_lock_page - locate, pin and lock a pagecache page
Mel Gorman2457aec2014-06-04 16:10:31 -0700270 * @mapping: the address_space to search
271 * @offset: the page index
272 *
273 * Looks up the page cache slot at @mapping & @offset. If there is a
274 * page cache page, it is returned locked and with an increased
275 * refcount.
276 *
277 * Otherwise, %NULL is returned.
278 *
279 * find_lock_page() may sleep.
280 */
281static inline struct page *find_lock_page(struct address_space *mapping,
282 pgoff_t offset)
283{
Michal Hocko45f87de2014-12-29 20:30:35 +0100284 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700285}
286
287/**
288 * find_or_create_page - locate or add a pagecache page
289 * @mapping: the page's address_space
290 * @index: the page's index into the mapping
291 * @gfp_mask: page allocation mode
292 *
293 * Looks up the page cache slot at @mapping & @offset. If there is a
294 * page cache page, it is returned locked and with an increased
295 * refcount.
296 *
297 * If the page is not present, a new page is allocated using @gfp_mask
298 * and added to the page cache and the VM's LRU list. The page is
299 * returned locked and with an increased refcount.
300 *
301 * On memory exhaustion, %NULL is returned.
302 *
303 * find_or_create_page() may sleep, even if @gfp_flags specifies an
304 * atomic allocation!
305 */
306static inline struct page *find_or_create_page(struct address_space *mapping,
307 pgoff_t offset, gfp_t gfp_mask)
308{
309 return pagecache_get_page(mapping, offset,
310 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
Michal Hocko45f87de2014-12-29 20:30:35 +0100311 gfp_mask);
Mel Gorman2457aec2014-06-04 16:10:31 -0700312}
313
314/**
315 * grab_cache_page_nowait - returns locked page at given index in given cache
316 * @mapping: target address_space
317 * @index: the page index
318 *
319 * Same as grab_cache_page(), but do not wait if the page is unavailable.
320 * This is intended for speculative data generators, where the data can
321 * be regenerated if the page couldn't be grabbed. This routine should
322 * be safe to call while holding the lock for another page.
323 *
324 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
325 * and deadlock against the caller's locked page.
326 */
327static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
328 pgoff_t index)
329{
330 return pagecache_get_page(mapping, index,
331 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
Michal Hocko45f87de2014-12-29 20:30:35 +0100332 mapping_gfp_mask(mapping));
Mel Gorman2457aec2014-06-04 16:10:31 -0700333}
334
Johannes Weiner0cd61442014-04-03 14:47:46 -0700335struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
Johannes Weiner0cd61442014-04-03 14:47:46 -0700336struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
Johannes Weiner0cd61442014-04-03 14:47:46 -0700337unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
338 unsigned int nr_entries, struct page **entries,
339 pgoff_t *indices);
Jan Karab947cee2017-09-06 16:21:21 -0700340unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
341 pgoff_t end, unsigned int nr_pages,
342 struct page **pages);
343static inline unsigned find_get_pages(struct address_space *mapping,
344 pgoff_t *start, unsigned int nr_pages,
345 struct page **pages)
346{
347 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
348 pages);
349}
Jens Axboeebf43502006-04-27 08:46:01 +0200350unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
351 unsigned int nr_pages, struct page **pages);
Jan Kara72b045a2017-11-15 17:34:33 -0800352unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
Matthew Wilcoxa69069722018-05-16 18:12:54 -0400353 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
Jan Kara72b045a2017-11-15 17:34:33 -0800354 struct page **pages);
355static inline unsigned find_get_pages_tag(struct address_space *mapping,
Matthew Wilcoxa69069722018-05-16 18:12:54 -0400356 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
Jan Kara72b045a2017-11-15 17:34:33 -0800357 struct page **pages)
358{
359 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
360 nr_pages, pages);
361}
Ross Zwisler7e7f7742016-01-22 15:10:44 -0800362unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
Matthew Wilcoxc1901cd2018-05-16 23:56:04 -0400363 xa_mark_t tag, unsigned int nr_entries,
Ross Zwisler7e7f7742016-01-22 15:10:44 -0800364 struct page **entries, pgoff_t *indices);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Nick Piggin54566b22009-01-04 12:00:53 -0800366struct page *grab_cache_page_write_begin(struct address_space *mapping,
367 pgoff_t index, unsigned flags);
Nick Pigginafddba42007-10-16 01:25:01 -0700368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369/*
370 * Returns locked page at given index in given cache, creating it if needed.
371 */
Fengguang Wu57f6b962007-10-16 01:24:37 -0700372static inline struct page *grab_cache_page(struct address_space *mapping,
373 pgoff_t index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
375 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
376}
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378extern struct page * read_cache_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700379 pgoff_t index, filler_t *filler, void *data);
Linus Torvalds0531b2a2010-01-27 09:20:03 -0800380extern struct page * read_cache_page_gfp(struct address_space *mapping,
381 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382extern int read_cache_pages(struct address_space *mapping,
383 struct list_head *pages, filler_t *filler, void *data);
384
Pekka Enberg090d2b12006-06-23 02:05:08 -0700385static inline struct page *read_mapping_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700386 pgoff_t index, void *data)
Pekka Enberg090d2b12006-06-23 02:05:08 -0700387{
388 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
389 return read_cache_page(mapping, index, filler, data);
390}
391
Nick Piggine2867812008-07-25 19:45:30 -0700392/*
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800393 * Get index of the page with in radix-tree
394 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700395 */
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800396static inline pgoff_t page_to_index(struct page *page)
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700397{
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800398 pgoff_t pgoff;
399
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800400 if (likely(!PageTransTail(page)))
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300401 return page->index;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800402
403 /*
404 * We don't initialize ->index for tail pages: calculate based on
405 * head page
406 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300407 pgoff = compound_head(page)->index;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800408 pgoff += page - compound_head(page);
409 return pgoff;
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700410}
411
412/*
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800413 * Get the offset in PAGE_SIZE.
414 * (TODO: hugepage should have ->index in PAGE_SIZE)
415 */
416static inline pgoff_t page_to_pgoff(struct page *page)
417{
418 if (unlikely(PageHeadHuge(page)))
419 return page->index << compound_order(page);
420
421 return page_to_index(page);
422}
423
424/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 * Return byte-offset into filesystem object for page.
426 */
427static inline loff_t page_offset(struct page *page)
428{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300429 return ((loff_t)page->index) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
Mel Gormanf981c592012-07-31 16:44:47 -0700432static inline loff_t page_file_offset(struct page *page)
433{
Huang Ying8cd79782016-10-07 17:00:24 -0700434 return ((loff_t)page_index(page)) << PAGE_SHIFT;
Mel Gormanf981c592012-07-31 16:44:47 -0700435}
436
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900437extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
438 unsigned long address);
439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
441 unsigned long address)
442{
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900443 pgoff_t pgoff;
444 if (unlikely(is_vm_hugetlb_page(vma)))
445 return linear_hugepage_index(vma, address);
446 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 pgoff += vma->vm_pgoff;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300448 return pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449}
450
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800451extern void __lock_page(struct page *page);
452extern int __lock_page_killable(struct page *page);
Michel Lespinassed065bd82010-10-26 14:21:57 -0700453extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
454 unsigned int flags);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800455extern void unlock_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
Nick Piggin529ae9a2008-08-02 12:01:03 +0200457static inline int trylock_page(struct page *page)
458{
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800459 page = compound_head(page);
Nick Piggin8413ac92008-10-18 20:26:59 -0700460 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
Nick Piggin529ae9a2008-08-02 12:01:03 +0200461}
462
Nick Piggindb376482006-09-25 23:31:24 -0700463/*
464 * lock_page may only be called if we have the page's inode pinned.
465 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466static inline void lock_page(struct page *page)
467{
468 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200469 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 __lock_page(page);
471}
Nick Piggindb376482006-09-25 23:31:24 -0700472
473/*
Matthew Wilcox2687a352007-12-06 11:18:49 -0500474 * lock_page_killable is like lock_page but can be interrupted by fatal
475 * signals. It returns 0 if it locked the page and -EINTR if it was
476 * killed while waiting.
477 */
478static inline int lock_page_killable(struct page *page)
479{
480 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200481 if (!trylock_page(page))
Matthew Wilcox2687a352007-12-06 11:18:49 -0500482 return __lock_page_killable(page);
483 return 0;
484}
485
486/*
Michel Lespinassed065bd82010-10-26 14:21:57 -0700487 * lock_page_or_retry - Lock the page, unless this would block and the
488 * caller indicated that it can handle a retry.
Paul Cassella9a95f3c2014-08-06 16:07:24 -0700489 *
490 * Return value and mmap_sem implications depend on flags; see
491 * __lock_page_or_retry().
Michel Lespinassed065bd82010-10-26 14:21:57 -0700492 */
493static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
494 unsigned int flags)
495{
496 might_sleep();
497 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
498}
499
500/*
Nicholas Piggin74d81bf2017-02-22 15:44:41 -0800501 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
502 * and should not be used directly.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800504extern void wait_on_page_bit(struct page *page, int bit_nr);
KOSAKI Motohirof62e00c2011-05-24 17:11:29 -0700505extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
NeilBrowna4796e372014-09-24 11:28:32 +1000506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507/*
508 * Wait for a page to be unlocked.
509 *
510 * This must be called with the caller "holding" the page,
511 * ie with increased "page->count" so that the page won't
512 * go away during the wait..
513 */
514static inline void wait_on_page_locked(struct page *page)
515{
516 if (PageLocked(page))
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800517 wait_on_page_bit(compound_head(page), PG_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518}
519
Nicholas Piggin62906022016-12-25 13:00:30 +1000520static inline int wait_on_page_locked_killable(struct page *page)
521{
522 if (!PageLocked(page))
523 return 0;
524 return wait_on_page_bit_killable(compound_head(page), PG_locked);
525}
526
Hugh Dickins9a1ea432018-12-28 00:36:14 -0800527extern void put_and_wait_on_page_locked(struct page *page);
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529/*
530 * Wait for a page to complete writeback
531 */
532static inline void wait_on_page_writeback(struct page *page)
533{
534 if (PageWriteback(page))
535 wait_on_page_bit(page, PG_writeback);
536}
537
538extern void end_page_writeback(struct page *page);
Darrick J. Wong1d1d1a72013-02-21 16:42:51 -0800539void wait_for_stable_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Jens Axboec11f0c02016-08-05 08:11:04 -0600541void page_endio(struct page *page, bool is_write, int err);
Matthew Wilcox57d99842014-06-04 16:07:45 -0700542
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543/*
David Howells385e1ca5f2009-04-03 16:42:39 +0100544 * Add an arbitrary waiter to a page's wait queue
545 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200546extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
David Howells385e1ca5f2009-04-03 16:42:39 +0100547
548/*
Al Viro4bce9f6e2016-09-17 18:02:44 -0400549 * Fault everything in given userspace address range in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 */
551static inline int fault_in_pages_writeable(char __user *uaddr, int size)
552{
Daniel Vetter99237772012-04-14 18:03:10 +0200553 char __user *end = uaddr + size - 1;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200554
555 if (unlikely(size == 0))
Al Viroe23d4152016-09-20 20:07:42 +0100556 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200557
Al Viroe23d4152016-09-20 20:07:42 +0100558 if (unlikely(uaddr > end))
559 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200560 /*
561 * Writing zeroes into userspace here is OK, because we know that if
562 * the zero gets there, we'll be overwriting it.
563 */
Al Viroe23d4152016-09-20 20:07:42 +0100564 do {
565 if (unlikely(__put_user(0, uaddr) != 0))
566 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200567 uaddr += PAGE_SIZE;
Al Viroe23d4152016-09-20 20:07:42 +0100568 } while (uaddr <= end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200569
570 /* Check whether the range spilled into the next page. */
571 if (((unsigned long)uaddr & PAGE_MASK) ==
572 ((unsigned long)end & PAGE_MASK))
Al Viroe23d4152016-09-20 20:07:42 +0100573 return __put_user(0, end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200574
Al Viroe23d4152016-09-20 20:07:42 +0100575 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200576}
577
Al Viro4bce9f6e2016-09-17 18:02:44 -0400578static inline int fault_in_pages_readable(const char __user *uaddr, int size)
Daniel Vetterf56f8212012-03-25 19:47:41 +0200579{
580 volatile char c;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200581 const char __user *end = uaddr + size - 1;
582
583 if (unlikely(size == 0))
Al Viroe23d4152016-09-20 20:07:42 +0100584 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200585
Al Viroe23d4152016-09-20 20:07:42 +0100586 if (unlikely(uaddr > end))
587 return -EFAULT;
588
589 do {
590 if (unlikely(__get_user(c, uaddr) != 0))
591 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200592 uaddr += PAGE_SIZE;
Al Viroe23d4152016-09-20 20:07:42 +0100593 } while (uaddr <= end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200594
595 /* Check whether the range spilled into the next page. */
596 if (((unsigned long)uaddr & PAGE_MASK) ==
597 ((unsigned long)end & PAGE_MASK)) {
Al Viroe23d4152016-09-20 20:07:42 +0100598 return __get_user(c, end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200599 }
600
Dave Chinner90b75db2016-09-26 09:57:33 +1000601 (void)c;
Al Viroe23d4152016-09-20 20:07:42 +0100602 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200603}
604
Nick Piggin529ae9a2008-08-02 12:01:03 +0200605int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
606 pgoff_t index, gfp_t gfp_mask);
607int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
608 pgoff_t index, gfp_t gfp_mask);
Minchan Kim97cecb52011-03-22 16:30:53 -0700609extern void delete_from_page_cache(struct page *page);
Johannes Weiner62cccb82016-03-15 14:57:22 -0700610extern void __delete_from_page_cache(struct page *page, void *shadow);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700611int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
Jan Karaaa65c292017-11-15 17:37:33 -0800612void delete_from_page_cache_batch(struct address_space *mapping,
613 struct pagevec *pvec);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200614
615/*
616 * Like add_to_page_cache_locked, but used to add newly allocated pages:
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800617 * the page is new, so we can just run __SetPageLocked() against it.
Nick Piggin529ae9a2008-08-02 12:01:03 +0200618 */
619static inline int add_to_page_cache(struct page *page,
620 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
621{
622 int error;
623
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800624 __SetPageLocked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200625 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
626 if (unlikely(error))
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800627 __ClearPageLocked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200628 return error;
629}
630
Fabian Frederickb57c2cb2015-05-24 17:19:41 +0200631static inline unsigned long dir_pages(struct inode *inode)
632{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300633 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
634 PAGE_SHIFT;
Fabian Frederickb57c2cb2015-05-24 17:19:41 +0200635}
636
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637#endif /* _LINUX_PAGEMAP_H */