blob: ea5cdbd8c2c326cd6f8702bfc2e6aa2e9fbca2c0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _LINUX_HIGHMEM_H
3#define _LINUX_HIGHMEM_H
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/fs.h>
Cesar Eduardo Barros597781f2010-08-09 17:18:32 -07006#include <linux/kernel.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05007#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/mm.h>
Peter Zijlstraad76fb62006-12-06 20:32:21 -08009#include <linux/uaccess.h>
Catalin Marinas43b3a0c2010-11-11 14:05:10 -080010#include <linux/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <asm/cacheflush.h>
13
James Bottomley03beb072006-03-26 01:36:57 -080014#ifndef ARCH_HAS_FLUSH_ANON_PAGE
Russell Kinga6f36be2006-12-30 22:24:19 +000015static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
James Bottomley03beb072006-03-26 01:36:57 -080016{
17}
18#endif
19
James Bottomley5a3a5a92006-03-26 01:36:59 -080020#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
21static inline void flush_kernel_dcache_page(struct page *page)
22{
23}
James Bottomley9df5f7412010-01-25 11:42:20 -060024static inline void flush_kernel_vmap_range(void *vaddr, int size)
25{
26}
27static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
28{
29}
James Bottomley5a3a5a92006-03-26 01:36:59 -080030#endif
31
Kumar Gala3688e07f2009-04-01 23:38:49 -050032#include <asm/kmap_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Kumar Gala3688e07f2009-04-01 23:38:49 -050034#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/highmem.h>
36
37/* declarations for linux/mm/highmem.c */
38unsigned int nr_free_highpages(void);
Arun KSca79b0c2018-12-28 00:34:29 -080039extern atomic_long_t _totalhigh_pages;
40static inline unsigned long totalhigh_pages(void)
41{
42 return (unsigned long)atomic_long_read(&_totalhigh_pages);
43}
44
45static inline void totalhigh_pages_inc(void)
46{
47 atomic_long_inc(&_totalhigh_pages);
48}
49
50static inline void totalhigh_pages_dec(void)
51{
52 atomic_long_dec(&_totalhigh_pages);
53}
54
55static inline void totalhigh_pages_add(long count)
56{
57 atomic_long_add(count, &_totalhigh_pages);
58}
59
60static inline void totalhigh_pages_set(long val)
61{
62 atomic_long_set(&_totalhigh_pages, val);
63}
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020065void kmap_flush_unused(void);
66
Mel Gorman5a178112012-07-31 16:45:02 -070067struct page *kmap_to_page(void *addr);
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#else /* CONFIG_HIGHMEM */
70
71static inline unsigned int nr_free_highpages(void) { return 0; }
72
Mel Gorman5a178112012-07-31 16:45:02 -070073static inline struct page *kmap_to_page(void *addr)
74{
75 return virt_to_page(addr);
76}
77
Arun KSca79b0c2018-12-28 00:34:29 -080078static inline unsigned long totalhigh_pages(void) { return 0UL; }
Christoph Lameterc1f60a52006-09-25 23:31:11 -070079
James Bottomleya6ca1b92006-09-25 23:30:55 -070080#ifndef ARCH_HAS_KMAP
Linus Torvalds1da177e2005-04-16 15:20:36 -070081static inline void *kmap(struct page *page)
82{
83 might_sleep();
84 return page_address(page);
85}
86
Matthew Wilcox31c91132009-06-16 15:32:45 -070087static inline void kunmap(struct page *page)
88{
89}
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Cong Wanga24401b2011-11-26 10:53:39 +080091static inline void *kmap_atomic(struct page *page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020092{
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020093 preempt_disable();
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020094 pagefault_disable();
95 return page_address(page);
96}
Cong Wanga24401b2011-11-26 10:53:39 +080097#define kmap_atomic_prot(page, prot) kmap_atomic(page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020098
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070099static inline void __kunmap_atomic(void *addr)
Andi Kleen4e60c862010-08-09 17:19:03 -0700100{
101 pagefault_enable();
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +0200102 preempt_enable();
Andi Kleen4e60c862010-08-09 17:19:03 -0700103}
104
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700105#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +0200106
107#define kmap_flush_unused() do {} while(0)
James Bottomleya6ca1b92006-09-25 23:30:55 -0700108#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110#endif /* CONFIG_HIGHMEM */
111
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700112#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
113
114DECLARE_PER_CPU(int, __kmap_atomic_idx);
115
116static inline int kmap_atomic_idx_push(void)
117{
Christoph Lametercfb82432010-12-06 11:40:03 -0600118 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
119
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700120#ifdef CONFIG_DEBUG_HIGHMEM
121 WARN_ON_ONCE(in_irq() && !irqs_disabled());
Chintan Pandya1d352bf2014-08-06 16:08:18 -0700122 BUG_ON(idx >= KM_TYPE_NR);
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700123#endif
124 return idx;
125}
126
Peter Zijlstra20273942010-10-27 15:32:58 -0700127static inline int kmap_atomic_idx(void)
128{
Christoph Lametercfb82432010-12-06 11:40:03 -0600129 return __this_cpu_read(__kmap_atomic_idx) - 1;
Peter Zijlstra20273942010-10-27 15:32:58 -0700130}
131
Christoph Lametercfb82432010-12-06 11:40:03 -0600132static inline void kmap_atomic_idx_pop(void)
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700133{
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700134#ifdef CONFIG_DEBUG_HIGHMEM
Christoph Lametercfb82432010-12-06 11:40:03 -0600135 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
136
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700137 BUG_ON(idx < 0);
Christoph Lametercfb82432010-12-06 11:40:03 -0600138#else
139 __this_cpu_dec(__kmap_atomic_idx);
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700140#endif
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700141}
142
143#endif
144
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700145/*
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700146 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
147 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
148 */
Cong Wang1285e4c2012-06-22 23:17:53 +0800149#define kunmap_atomic(addr) \
Cong Wang980c19e2011-11-25 22:08:45 +0800150do { \
151 BUILD_BUG_ON(__same_type((addr), struct page *)); \
152 __kunmap_atomic(addr); \
153} while (0)
154
Cong Wang980c19e2011-11-25 22:08:45 +0800155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
Russell King487ff322008-11-27 11:13:58 +0000157#ifndef clear_user_highpage
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
159{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800160 void *addr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 clear_user_page(addr, vaddr, page);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800162 kunmap_atomic(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
Russell King487ff322008-11-27 11:13:58 +0000164#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
Mel Gorman769848c2007-07-17 04:03:05 -0700167/**
168 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
169 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
170 * @vma: The VMA the page is to be allocated for
171 * @vaddr: The virtual address the page will be inserted into
172 *
173 * This function will allocate a page for a VMA but the caller is expected
174 * to specify via movableflags whether the page will be movable in the
175 * future or not
176 *
177 * An architecture may override this function by defining
178 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
179 * implementation.
180 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181static inline struct page *
Mel Gorman769848c2007-07-17 04:03:05 -0700182__alloc_zeroed_user_highpage(gfp_t movableflags,
183 struct vm_area_struct *vma,
184 unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
Mel Gorman769848c2007-07-17 04:03:05 -0700186 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
187 vma, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 if (page)
190 clear_user_highpage(page, vaddr);
191
192 return page;
193}
194#endif
195
Mel Gorman769848c2007-07-17 04:03:05 -0700196/**
Mel Gorman769848c2007-07-17 04:03:05 -0700197 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
198 * @vma: The VMA the page is to be allocated for
199 * @vaddr: The virtual address the page will be inserted into
200 *
201 * This function will allocate a page for a VMA that the caller knows will
202 * be able to migrate in the future using move_pages() or reclaimed
203 */
204static inline struct page *
205alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
206 unsigned long vaddr)
207{
208 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
209}
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211static inline void clear_highpage(struct page *page)
212{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800213 void *kaddr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 clear_page(kaddr);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800215 kunmap_atomic(kaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800218static inline void zero_user_segments(struct page *page,
219 unsigned start1, unsigned end1,
220 unsigned start2, unsigned end2)
221{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800222 void *kaddr = kmap_atomic(page);
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800223
224 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
225
226 if (end1 > start1)
227 memset(kaddr + start1, 0, end1 - start1);
228
229 if (end2 > start2)
230 memset(kaddr + start2, 0, end2 - start2);
231
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800232 kunmap_atomic(kaddr);
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800233 flush_dcache_page(page);
234}
235
236static inline void zero_user_segment(struct page *page,
237 unsigned start, unsigned end)
238{
239 zero_user_segments(page, start, end, 0, 0);
240}
241
242static inline void zero_user(struct page *page,
243 unsigned start, unsigned size)
244{
245 zero_user_segments(page, start, start + size, 0, 0);
246}
Nate Diller01f27052007-05-09 02:35:07 -0700247
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000248#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
249
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000250static inline void copy_user_highpage(struct page *to, struct page *from,
251 unsigned long vaddr, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252{
253 char *vfrom, *vto;
254
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800255 vfrom = kmap_atomic(from);
256 vto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 copy_user_page(vto, vfrom, vaddr, to);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800258 kunmap_atomic(vto);
259 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260}
261
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000262#endif
263
Khalid Aziza4602b62018-02-21 10:15:51 -0700264#ifndef __HAVE_ARCH_COPY_HIGHPAGE
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266static inline void copy_highpage(struct page *to, struct page *from)
267{
268 char *vfrom, *vto;
269
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800270 vfrom = kmap_atomic(from);
271 vto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 copy_page(vto, vfrom);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800273 kunmap_atomic(vto);
274 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
276
Khalid Aziza4602b62018-02-21 10:15:51 -0700277#endif
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279#endif /* _LINUX_HIGHMEM_H */