blob: b377df76cc281da4d734d88478f703a05718c40d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Jeff Diked83ecf02008-02-04 22:30:47 -08002 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright 2003 PathScale, Inc.
4 * Derived from include/asm-i386/pgtable.h
5 * Licensed under the GPL
6 */
7
8#ifndef __UM_PGTABLE_H
9#define __UM_PGTABLE_H
10
Jeff Dike300ecf52008-02-04 22:30:47 -080011#include <asm/fixmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13#define _PAGE_PRESENT 0x001
14#define _PAGE_NEWPAGE 0x002
Paolo 'Blaisorblade' Giarrusso9b4ee402005-09-03 15:54:57 -070015#define _PAGE_NEWPROT 0x004
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#define _PAGE_RW 0x020
17#define _PAGE_USER 0x040
18#define _PAGE_ACCESSED 0x080
19#define _PAGE_DIRTY 0x100
Paolo 'Blaisorblade' Giarrusso9b4ee402005-09-03 15:54:57 -070020/* If _PAGE_PRESENT is clear, we use these: */
Paolo 'Blaisorblade' Giarrusso9b4ee402005-09-03 15:54:57 -070021#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
22 pte_present gives true */
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24#ifdef CONFIG_3_LEVEL_PGTABLES
Al Viro37185b32012-10-08 03:27:32 +010025#include <asm/pgtable-3level.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#else
Al Viro37185b32012-10-08 03:27:32 +010027#include <asm/pgtable-2level.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#endif
29
30extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032/* zero page used for uninitialized stuff */
33extern unsigned long *empty_zero_page;
34
35#define pgtable_cache_init() do ; while (0)
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037/* Just any arbitrary offset to the start of the vmalloc VM area: the
38 * current 8MB value just means that there will be a 8MB "hole" after the
39 * physical memory until the kernel virtual memory starts. That means that
40 * any out-of-bounds memory accesses will hopefully be caught.
41 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
42 * area for the same reason. ;)
43 */
44
45extern unsigned long end_iomem;
46
47#define VMALLOC_OFFSET (__va_space)
48#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
Al Virofe1cd982008-08-18 04:15:12 -040049#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
Richard Weinbergera98a6d82015-03-18 21:59:35 +010050#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
Amerigo Wangc398df32009-06-03 21:46:46 -040051#define MODULES_VADDR VMALLOC_START
52#define MODULES_END VMALLOC_END
53#define MODULES_LEN (MODULES_VADDR - MODULES_END)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
56#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
57#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
Amerigo Wangc398df32009-06-03 21:46:46 -040058#define __PAGE_KERNEL_EXEC \
59 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
61#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
62#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
63#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
64#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
Amerigo Wangc398df32009-06-03 21:46:46 -040065#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67/*
Jeff Diked83ecf02008-02-04 22:30:47 -080068 * The i386 can't do page protection for execute, and considers that the same
69 * are read.
70 * Also, write permissions imply read permissions. This is the closest we can
71 * get..
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 */
73#define __P000 PAGE_NONE
74#define __P001 PAGE_READONLY
75#define __P010 PAGE_COPY
76#define __P011 PAGE_COPY
77#define __P100 PAGE_READONLY
78#define __P101 PAGE_READONLY
79#define __P110 PAGE_COPY
80#define __P111 PAGE_COPY
81
82#define __S000 PAGE_NONE
83#define __S001 PAGE_READONLY
84#define __S010 PAGE_SHARED
85#define __S011 PAGE_SHARED
86#define __S100 PAGE_READONLY
87#define __S101 PAGE_READONLY
88#define __S110 PAGE_SHARED
89#define __S111 PAGE_SHARED
90
91/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 * ZERO_PAGE is a global shared page that is always zero: used
93 * for zero-mapped memory areas etc..
94 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
98
Hugh Dickins705e87c2005-10-29 18:16:27 -070099#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
Jeff Diked83ecf02008-02-04 22:30:47 -0800101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
103#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
104
105#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
106#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
107
108#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
109#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
112
Jeff Dike08964c52005-09-03 15:57:41 -0700113#define pte_page(x) pfn_to_page(pte_pfn(x))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Jeff Dike08964c52005-09-03 15:57:41 -0700115#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
116
117/*
118 * =================================
119 * Flags checking section.
120 * =================================
121 */
122
123static inline int pte_none(pte_t pte)
124{
125 return pte_is_zero(pte);
126}
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128/*
129 * The following only work if pte_present() is true.
130 * Undefined behaviour if not..
131 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static inline int pte_read(pte_t pte)
133{
134 return((pte_get_bits(pte, _PAGE_USER)) &&
135 !(pte_get_bits(pte, _PAGE_PROTNONE)));
136}
137
138static inline int pte_exec(pte_t pte){
139 return((pte_get_bits(pte, _PAGE_USER)) &&
140 !(pte_get_bits(pte, _PAGE_PROTNONE)));
141}
142
143static inline int pte_write(pte_t pte)
144{
145 return((pte_get_bits(pte, _PAGE_RW)) &&
146 !(pte_get_bits(pte, _PAGE_PROTNONE)));
147}
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149static inline int pte_dirty(pte_t pte)
150{
151 return pte_get_bits(pte, _PAGE_DIRTY);
152}
153
154static inline int pte_young(pte_t pte)
155{
156 return pte_get_bits(pte, _PAGE_ACCESSED);
157}
158
159static inline int pte_newpage(pte_t pte)
160{
161 return pte_get_bits(pte, _PAGE_NEWPAGE);
162}
163
164static inline int pte_newprot(pte_t pte)
165{
166 return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
167}
168
Nick Piggin7e675132008-04-28 02:13:00 -0700169static inline int pte_special(pte_t pte)
170{
171 return 0;
172}
173
Jeff Dike08964c52005-09-03 15:57:41 -0700174/*
175 * =================================
176 * Flags setting section.
177 * =================================
178 */
179
180static inline pte_t pte_mknewprot(pte_t pte)
181{
182 pte_set_bits(pte, _PAGE_NEWPROT);
183 return(pte);
184}
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186static inline pte_t pte_mkclean(pte_t pte)
187{
188 pte_clear_bits(pte, _PAGE_DIRTY);
189 return(pte);
190}
191
192static inline pte_t pte_mkold(pte_t pte)
193{
194 pte_clear_bits(pte, _PAGE_ACCESSED);
195 return(pte);
196}
197
198static inline pte_t pte_wrprotect(pte_t pte)
199{
Anton Ivanov8892d852018-12-05 12:37:41 +0000200 if (likely(pte_get_bits(pte, _PAGE_RW)))
201 pte_clear_bits(pte, _PAGE_RW);
202 else
203 return pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 return(pte_mknewprot(pte));
205}
206
207static inline pte_t pte_mkread(pte_t pte)
208{
Anton Ivanov8892d852018-12-05 12:37:41 +0000209 if (unlikely(pte_get_bits(pte, _PAGE_USER)))
210 return pte;
Jeff Dike1463fdb2007-02-28 20:13:33 -0800211 pte_set_bits(pte, _PAGE_USER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 return(pte_mknewprot(pte));
213}
214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215static inline pte_t pte_mkdirty(pte_t pte)
216{
217 pte_set_bits(pte, _PAGE_DIRTY);
218 return(pte);
219}
220
221static inline pte_t pte_mkyoung(pte_t pte)
222{
223 pte_set_bits(pte, _PAGE_ACCESSED);
224 return(pte);
225}
226
227static inline pte_t pte_mkwrite(pte_t pte)
228{
Anton Ivanov8892d852018-12-05 12:37:41 +0000229 if (unlikely(pte_get_bits(pte, _PAGE_RW)))
230 return pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 pte_set_bits(pte, _PAGE_RW);
232 return(pte_mknewprot(pte));
233}
234
235static inline pte_t pte_mkuptodate(pte_t pte)
236{
237 pte_clear_bits(pte, _PAGE_NEWPAGE);
238 if(pte_present(pte))
239 pte_clear_bits(pte, _PAGE_NEWPROT);
240 return(pte);
241}
242
Jeff Dike08964c52005-09-03 15:57:41 -0700243static inline pte_t pte_mknewpage(pte_t pte)
244{
245 pte_set_bits(pte, _PAGE_NEWPAGE);
246 return(pte);
247}
248
Nick Piggin7e675132008-04-28 02:13:00 -0700249static inline pte_t pte_mkspecial(pte_t pte)
250{
251 return(pte);
252}
253
Jeff Dike08964c52005-09-03 15:57:41 -0700254static inline void set_pte(pte_t *pteptr, pte_t pteval)
255{
256 pte_copy(*pteptr, pteval);
257
258 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
259 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
260 * mapped pages.
261 */
262
263 *pteptr = pte_mknewpage(*pteptr);
264 if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
265}
Bartosz Golaszewskiea70d792019-04-11 11:49:43 +0200266
267static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
268 pte_t *pteptr, pte_t pteval)
269{
270 set_pte(pteptr, pteval);
271}
Jeff Dike08964c52005-09-03 15:57:41 -0700272
Richard Weinbergerf15b9002012-04-14 17:29:30 +0200273#define __HAVE_ARCH_PTE_SAME
274static inline int pte_same(pte_t pte_a, pte_t pte_b)
275{
276 return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
277}
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279/*
280 * Conversion functions: convert a page and protection to a page entry,
281 * and a page entry and page directory to the page they refer to.
282 */
283
Paolo 'Blaisorblade' Giarrussod99c4022005-09-10 19:44:56 +0200284#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
285#define __virt_to_page(virt) phys_to_page(__pa(virt))
Dan Williams16da3062016-01-15 16:56:08 -0800286#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
Jeff Diked83ecf02008-02-04 22:30:47 -0800287#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
Paolo 'Blaisorblade' Giarrussod99c4022005-09-10 19:44:56 +0200288
289#define mk_pte(page, pgprot) \
290 ({ pte_t pte; \
291 \
292 pte_set_val(pte, page_to_phys(page), (pgprot)); \
293 if (pte_present(pte)) \
294 pte_mknewprot(pte_mknewpage(pte)); \
295 pte;})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
298{
299 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 return pte;
301}
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303/*
304 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
305 *
306 * this macro returns the index of the entry in the pgd page which would
307 * control the given virtual address
308 */
309#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311/*
312 * pgd_offset() returns a (pgd_t *)
313 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
314 */
315#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
316
317/*
318 * a shortcut which implies the use of the kernel's pgd, instead
319 * of a process's
320 */
321#define pgd_offset_k(address) pgd_offset(&init_mm, address)
322
323/*
324 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
325 *
326 * this macro returns the index of the entry in the pmd page which would
327 * control the given virtual address
328 */
Jeff Dike300ecf52008-02-04 22:30:47 -0800329#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
331
Jeff Dike909e90d2008-02-04 22:31:06 -0800332#define pmd_page_vaddr(pmd) \
333 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335/*
336 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
337 *
338 * this macro returns the index of the entry in the pte page which would
339 * control the given virtual address
340 */
341#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
342#define pte_offset_kernel(dir, address) \
Dave McCracken46a82b22006-09-25 23:31:48 -0700343 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344#define pte_offset_map(dir, address) \
345 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346#define pte_unmap(pte) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
Jeff Dikeca77b552008-02-04 22:30:55 -0800348struct mm_struct;
349extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
350
Russell King4b3073e2009-12-18 16:40:18 +0000351#define update_mmu_cache(vma,address,ptep) do ; while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
353/* Encode and de-code a swap entry */
Richard Weinberger2b76eba2012-04-14 17:46:01 +0200354#define __swp_type(x) (((x).val >> 5) & 0x1f)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355#define __swp_offset(x) ((x).val >> 11)
356
357#define __swp_entry(type, offset) \
Richard Weinberger2b76eba2012-04-14 17:46:01 +0200358 ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359#define __pte_to_swp_entry(pte) \
360 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
361#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
362
363#define kern_addr_valid(addr) (1)
364
365#include <asm-generic/pgtable.h>
366
Al Virofe1cd982008-08-18 04:15:12 -0400367/* Clear a kernel PTE and flush it from the TLB */
368#define kpte_clear_flush(ptep, vaddr) \
369do { \
370 pte_clear(&init_mm, (vaddr), (ptep)); \
371 __flush_tlb_one((vaddr)); \
372} while (0)
373
Al Viro04add672007-02-01 13:53:04 +0000374#endif