blob: 9ca902df243a36577951fcb2bd6c0a68274cdfac [file] [log] [blame]
Jeff Dike5e1f65a2006-09-25 23:33:01 -07001/*
Jeff Dikeba180fd2007-10-16 01:27:00 -07002 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Licensed under the GPL
4 */
5
Jeff Dike8192ab42008-02-04 22:30:53 -08006#include <linux/mm.h>
Al Viro73395a02011-08-18 20:14:10 +01007#include <linux/module.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +01008#include <linux/sched/signal.h>
9
Jeff Dike8192ab42008-02-04 22:30:53 -080010#include <asm/pgtable.h>
11#include <asm/tlbflush.h>
Al Viro37185b32012-10-08 03:27:32 +010012#include <as-layout.h>
13#include <mem_user.h>
14#include <os.h>
15#include <skas.h>
Richard Weinberger468f6592014-07-20 13:16:20 +020016#include <kern_util.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Jeff Dike1466abf2007-10-16 01:27:12 -070018struct host_vm_change {
19 struct host_vm_op {
20 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
21 union {
22 struct {
23 unsigned long addr;
24 unsigned long len;
25 unsigned int prot;
26 int fd;
27 __u64 offset;
28 } mmap;
29 struct {
30 unsigned long addr;
31 unsigned long len;
32 } munmap;
33 struct {
34 unsigned long addr;
35 unsigned long len;
36 unsigned int prot;
37 } mprotect;
38 } u;
39 } ops[1];
Anton Ivanova9c52c22018-12-05 12:37:39 +000040 int userspace;
Jeff Dike1466abf2007-10-16 01:27:12 -070041 int index;
Anton Ivanova9c52c22018-12-05 12:37:39 +000042 struct mm_struct *mm;
Jeff Dike1466abf2007-10-16 01:27:12 -070043 void *data;
44 int force;
45};
46
Anton Ivanova9c52c22018-12-05 12:37:39 +000047#define INIT_HVC(mm, force, userspace) \
Jeff Dike1466abf2007-10-16 01:27:12 -070048 ((struct host_vm_change) \
49 { .ops = { { .type = NONE } }, \
Anton Ivanova9c52c22018-12-05 12:37:39 +000050 .mm = mm, \
Jeff Dike1466abf2007-10-16 01:27:12 -070051 .data = NULL, \
Anton Ivanova9c52c22018-12-05 12:37:39 +000052 .userspace = userspace, \
Jeff Dike1466abf2007-10-16 01:27:12 -070053 .index = 0, \
54 .force = force })
55
Richard Weinberger70c82052015-10-25 22:26:09 +010056static void report_enomem(void)
57{
58 printk(KERN_ERR "UML ran out of memory on the host side! "
59 "This can happen due to a memory limitation or "
60 "vm.max_map_count has been reached.\n");
61}
62
Jeff Dike1466abf2007-10-16 01:27:12 -070063static int do_ops(struct host_vm_change *hvc, int end,
64 int finished)
65{
66 struct host_vm_op *op;
67 int i, ret = 0;
68
69 for (i = 0; i < end && !ret; i++) {
70 op = &hvc->ops[i];
Jeff Dikec5d4bb12008-02-04 22:31:14 -080071 switch (op->type) {
Jeff Dike1466abf2007-10-16 01:27:12 -070072 case MMAP:
Anton Ivanova9c52c22018-12-05 12:37:39 +000073 if (hvc->userspace)
74 ret = map(&hvc->mm->context.id, op->u.mmap.addr,
75 op->u.mmap.len, op->u.mmap.prot,
76 op->u.mmap.fd,
77 op->u.mmap.offset, finished,
78 &hvc->data);
79 else
80 map_memory(op->u.mmap.addr, op->u.mmap.offset,
81 op->u.mmap.len, 1, 1, 1);
Jeff Dike1466abf2007-10-16 01:27:12 -070082 break;
83 case MUNMAP:
Anton Ivanova9c52c22018-12-05 12:37:39 +000084 if (hvc->userspace)
85 ret = unmap(&hvc->mm->context.id,
86 op->u.munmap.addr,
87 op->u.munmap.len, finished,
88 &hvc->data);
89 else
90 ret = os_unmap_memory(
91 (void *) op->u.munmap.addr,
92 op->u.munmap.len);
93
Jeff Dike1466abf2007-10-16 01:27:12 -070094 break;
95 case MPROTECT:
Anton Ivanova9c52c22018-12-05 12:37:39 +000096 if (hvc->userspace)
97 ret = protect(&hvc->mm->context.id,
98 op->u.mprotect.addr,
99 op->u.mprotect.len,
100 op->u.mprotect.prot,
101 finished, &hvc->data);
102 else
103 ret = os_protect_memory(
104 (void *) op->u.mprotect.addr,
105 op->u.mprotect.len,
106 1, 1, 1);
Jeff Dike1466abf2007-10-16 01:27:12 -0700107 break;
108 default:
109 printk(KERN_ERR "Unknown op type %d in do_ops\n",
110 op->type);
Richard Weinberger62179d42012-04-13 15:54:01 +0200111 BUG();
Jeff Dike1466abf2007-10-16 01:27:12 -0700112 break;
113 }
114 }
115
Richard Weinberger70c82052015-10-25 22:26:09 +0100116 if (ret == -ENOMEM)
117 report_enomem();
118
Jeff Dike1466abf2007-10-16 01:27:12 -0700119 return ret;
120}
121
Jeff Dikec5600492005-09-03 15:57:36 -0700122static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
Jeff Dike1466abf2007-10-16 01:27:12 -0700123 unsigned int prot, struct host_vm_change *hvc)
Jeff Dikec5600492005-09-03 15:57:36 -0700124{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700125 __u64 offset;
Jeff Dikec5600492005-09-03 15:57:36 -0700126 struct host_vm_op *last;
Anton Ivanova9c52c22018-12-05 12:37:39 +0000127 int fd = -1, ret = 0;
Jeff Dikec5600492005-09-03 15:57:36 -0700128
Anton Ivanova9c52c22018-12-05 12:37:39 +0000129 if (hvc->userspace)
130 fd = phys_mapping(phys, &offset);
131 else
132 offset = phys;
Jeff Dike1466abf2007-10-16 01:27:12 -0700133 if (hvc->index != 0) {
134 last = &hvc->ops[hvc->index - 1];
Jeff Dikeba180fd2007-10-16 01:27:00 -0700135 if ((last->type == MMAP) &&
Jeff Dikec5600492005-09-03 15:57:36 -0700136 (last->u.mmap.addr + last->u.mmap.len == virt) &&
Jeff Dike16dd07b2007-05-06 14:51:48 -0700137 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
Jeff Dikeba180fd2007-10-16 01:27:00 -0700138 (last->u.mmap.offset + last->u.mmap.len == offset)) {
Jeff Dikec5600492005-09-03 15:57:36 -0700139 last->u.mmap.len += len;
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700140 return 0;
Jeff Dikec5600492005-09-03 15:57:36 -0700141 }
142 }
143
Jeff Dike1466abf2007-10-16 01:27:12 -0700144 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
145 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
146 hvc->index = 0;
Jeff Dikec5600492005-09-03 15:57:36 -0700147 }
148
Jeff Dike1466abf2007-10-16 01:27:12 -0700149 hvc->ops[hvc->index++] = ((struct host_vm_op)
150 { .type = MMAP,
151 .u = { .mmap = { .addr = virt,
152 .len = len,
153 .prot = prot,
154 .fd = fd,
155 .offset = offset }
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700156 } });
157 return ret;
Jeff Dikec5600492005-09-03 15:57:36 -0700158}
159
160static int add_munmap(unsigned long addr, unsigned long len,
Jeff Dike1466abf2007-10-16 01:27:12 -0700161 struct host_vm_change *hvc)
Jeff Dikec5600492005-09-03 15:57:36 -0700162{
163 struct host_vm_op *last;
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700164 int ret = 0;
Jeff Dikec5600492005-09-03 15:57:36 -0700165
Richard Weinberger284e6d32014-07-20 13:09:15 +0200166 if ((addr >= STUB_START) && (addr < STUB_END))
167 return -EINVAL;
168
Jeff Dike1466abf2007-10-16 01:27:12 -0700169 if (hvc->index != 0) {
170 last = &hvc->ops[hvc->index - 1];
Jeff Dikeba180fd2007-10-16 01:27:00 -0700171 if ((last->type == MUNMAP) &&
172 (last->u.munmap.addr + last->u.mmap.len == addr)) {
Jeff Dikec5600492005-09-03 15:57:36 -0700173 last->u.munmap.len += len;
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700174 return 0;
Jeff Dikec5600492005-09-03 15:57:36 -0700175 }
176 }
177
Jeff Dike1466abf2007-10-16 01:27:12 -0700178 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
179 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
180 hvc->index = 0;
Jeff Dikec5600492005-09-03 15:57:36 -0700181 }
182
Jeff Dike1466abf2007-10-16 01:27:12 -0700183 hvc->ops[hvc->index++] = ((struct host_vm_op)
184 { .type = MUNMAP,
185 .u = { .munmap = { .addr = addr,
186 .len = len } } });
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700187 return ret;
Jeff Dikec5600492005-09-03 15:57:36 -0700188}
189
Jeff Dike16dd07b2007-05-06 14:51:48 -0700190static int add_mprotect(unsigned long addr, unsigned long len,
Jeff Dike1466abf2007-10-16 01:27:12 -0700191 unsigned int prot, struct host_vm_change *hvc)
Jeff Dikec5600492005-09-03 15:57:36 -0700192{
193 struct host_vm_op *last;
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700194 int ret = 0;
Jeff Dikec5600492005-09-03 15:57:36 -0700195
Jeff Dike1466abf2007-10-16 01:27:12 -0700196 if (hvc->index != 0) {
197 last = &hvc->ops[hvc->index - 1];
Jeff Dikeba180fd2007-10-16 01:27:00 -0700198 if ((last->type == MPROTECT) &&
Jeff Dikec5600492005-09-03 15:57:36 -0700199 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
Jeff Dikeba180fd2007-10-16 01:27:00 -0700200 (last->u.mprotect.prot == prot)) {
Jeff Dikec5600492005-09-03 15:57:36 -0700201 last->u.mprotect.len += len;
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700202 return 0;
Jeff Dikec5600492005-09-03 15:57:36 -0700203 }
204 }
205
Jeff Dike1466abf2007-10-16 01:27:12 -0700206 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
207 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
208 hvc->index = 0;
Jeff Dikec5600492005-09-03 15:57:36 -0700209 }
210
Jeff Dike1466abf2007-10-16 01:27:12 -0700211 hvc->ops[hvc->index++] = ((struct host_vm_op)
212 { .type = MPROTECT,
213 .u = { .mprotect = { .addr = addr,
214 .len = len,
215 .prot = prot } } });
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700216 return ret;
Jeff Dikec5600492005-09-03 15:57:36 -0700217}
218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
220
Jeff Dike7f0536f2007-05-06 14:51:30 -0700221static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
Jeff Dike1466abf2007-10-16 01:27:12 -0700222 unsigned long end,
223 struct host_vm_change *hvc)
Jeff Dike7f0536f2007-05-06 14:51:30 -0700224{
225 pte_t *pte;
Jeff Dike16dd07b2007-05-06 14:51:48 -0700226 int r, w, x, prot, ret = 0;
Jeff Dike7f0536f2007-05-06 14:51:30 -0700227
228 pte = pte_offset_kernel(pmd, addr);
229 do {
Jeff Dike39633332008-02-04 22:31:01 -0800230 if ((addr >= STUB_START) && (addr < STUB_END))
231 continue;
232
Jeff Dike7f0536f2007-05-06 14:51:30 -0700233 r = pte_read(*pte);
234 w = pte_write(*pte);
235 x = pte_exec(*pte);
236 if (!pte_young(*pte)) {
237 r = 0;
238 w = 0;
Jeff Dike0b4e2732008-02-04 22:31:07 -0800239 } else if (!pte_dirty(*pte))
Jeff Dike7f0536f2007-05-06 14:51:30 -0700240 w = 0;
Jeff Dike0b4e2732008-02-04 22:31:07 -0800241
Jeff Dike16dd07b2007-05-06 14:51:48 -0700242 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
243 (x ? UM_PROT_EXEC : 0));
Jeff Dike1466abf2007-10-16 01:27:12 -0700244 if (hvc->force || pte_newpage(*pte)) {
Jeff Dikeba180fd2007-10-16 01:27:00 -0700245 if (pte_present(*pte))
Jeff Dike7f0536f2007-05-06 14:51:30 -0700246 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
Jeff Dike1466abf2007-10-16 01:27:12 -0700247 PAGE_SIZE, prot, hvc);
Jeff Dike0b4e2732008-02-04 22:31:07 -0800248 else
249 ret = add_munmap(addr, PAGE_SIZE, hvc);
250 } else if (pte_newprot(*pte))
Jeff Dike1466abf2007-10-16 01:27:12 -0700251 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
Jeff Dike7f0536f2007-05-06 14:51:30 -0700252 *pte = pte_mkuptodate(*pte);
Jeff Dike909e90d2008-02-04 22:31:06 -0800253 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
Jeff Dike7f0536f2007-05-06 14:51:30 -0700254 return ret;
255}
256
257static inline int update_pmd_range(pud_t *pud, unsigned long addr,
Jeff Dike1466abf2007-10-16 01:27:12 -0700258 unsigned long end,
259 struct host_vm_change *hvc)
Jeff Dike7f0536f2007-05-06 14:51:30 -0700260{
261 pmd_t *pmd;
262 unsigned long next;
263 int ret = 0;
264
265 pmd = pmd_offset(pud, addr);
266 do {
267 next = pmd_addr_end(addr, end);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700268 if (!pmd_present(*pmd)) {
Jeff Dike1466abf2007-10-16 01:27:12 -0700269 if (hvc->force || pmd_newpage(*pmd)) {
270 ret = add_munmap(addr, next - addr, hvc);
Jeff Dike7f0536f2007-05-06 14:51:30 -0700271 pmd_mkuptodate(*pmd);
272 }
273 }
Jeff Dike1466abf2007-10-16 01:27:12 -0700274 else ret = update_pte_range(pmd, addr, next, hvc);
Jeff Dike909e90d2008-02-04 22:31:06 -0800275 } while (pmd++, addr = next, ((addr < end) && !ret));
Jeff Dike7f0536f2007-05-06 14:51:30 -0700276 return ret;
277}
278
279static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
Jeff Dike1466abf2007-10-16 01:27:12 -0700280 unsigned long end,
281 struct host_vm_change *hvc)
Jeff Dike7f0536f2007-05-06 14:51:30 -0700282{
283 pud_t *pud;
284 unsigned long next;
285 int ret = 0;
286
287 pud = pud_offset(pgd, addr);
288 do {
289 next = pud_addr_end(addr, end);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700290 if (!pud_present(*pud)) {
Jeff Dike1466abf2007-10-16 01:27:12 -0700291 if (hvc->force || pud_newpage(*pud)) {
292 ret = add_munmap(addr, next - addr, hvc);
Jeff Dike7f0536f2007-05-06 14:51:30 -0700293 pud_mkuptodate(*pud);
294 }
295 }
Jeff Dike1466abf2007-10-16 01:27:12 -0700296 else ret = update_pmd_range(pud, addr, next, hvc);
Jeff Dike909e90d2008-02-04 22:31:06 -0800297 } while (pud++, addr = next, ((addr < end) && !ret));
Jeff Dike7f0536f2007-05-06 14:51:30 -0700298 return ret;
299}
300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
Jeff Dike1466abf2007-10-16 01:27:12 -0700302 unsigned long end_addr, int force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
Jeff Dike7f0536f2007-05-06 14:51:30 -0700304 pgd_t *pgd;
Jeff Dike1466abf2007-10-16 01:27:12 -0700305 struct host_vm_change hvc;
Jeff Dike7f0536f2007-05-06 14:51:30 -0700306 unsigned long addr = start_addr, next;
Anton Ivanova9c52c22018-12-05 12:37:39 +0000307 int ret = 0, userspace = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Anton Ivanova9c52c22018-12-05 12:37:39 +0000309 hvc = INIT_HVC(mm, force, userspace);
Jeff Dike7f0536f2007-05-06 14:51:30 -0700310 pgd = pgd_offset(mm, addr);
311 do {
312 next = pgd_addr_end(addr, end_addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700313 if (!pgd_present(*pgd)) {
314 if (force || pgd_newpage(*pgd)) {
Jeff Dike1466abf2007-10-16 01:27:12 -0700315 ret = add_munmap(addr, next - addr, &hvc);
Jeff Dike7f0536f2007-05-06 14:51:30 -0700316 pgd_mkuptodate(*pgd);
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700317 }
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700318 }
Jeff Dike1466abf2007-10-16 01:27:12 -0700319 else ret = update_pud_range(pgd, addr, next, &hvc);
Jeff Dike909e90d2008-02-04 22:31:06 -0800320 } while (pgd++, addr = next, ((addr < end_addr) && !ret));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Jeff Dikeba180fd2007-10-16 01:27:00 -0700322 if (!ret)
Jeff Dike1466abf2007-10-16 01:27:12 -0700323 ret = do_ops(&hvc, hvc.index, 1);
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700324
Jeff Dike7f0536f2007-05-06 14:51:30 -0700325 /* This is not an else because ret is modified above */
Jeff Dikeba180fd2007-10-16 01:27:00 -0700326 if (ret) {
327 printk(KERN_ERR "fix_range_common: failed, killing current "
Richard Weinberger468f6592014-07-20 13:16:20 +0200328 "process: %d\n", task_tgid_vnr(current));
329 /* We are under mmap_sem, release it such that current can terminate */
330 up_write(&current->mm->mmap_sem);
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700331 force_sig(SIGKILL, current);
Ingo Molnarccaee5f2015-07-03 12:44:20 -0700332 do_signal(&current->thread.regs);
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334}
335
Al Viroc75d0532011-08-18 20:07:49 +0100336static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700338 struct mm_struct *mm;
339 pgd_t *pgd;
340 pud_t *pud;
341 pmd_t *pmd;
342 pte_t *pte;
343 unsigned long addr, last;
Anton Ivanova9c52c22018-12-05 12:37:39 +0000344 int updated = 0, err = 0, force = 0, userspace = 0;
345 struct host_vm_change hvc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700347 mm = &init_mm;
Anton Ivanova9c52c22018-12-05 12:37:39 +0000348 hvc = INIT_HVC(mm, force, userspace);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700349 for (addr = start; addr < end;) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700350 pgd = pgd_offset(mm, addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700351 if (!pgd_present(*pgd)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700352 last = ADD_ROUND(addr, PGDIR_SIZE);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700353 if (last > end)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700354 last = end;
Jeff Dikeba180fd2007-10-16 01:27:00 -0700355 if (pgd_newpage(*pgd)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700356 updated = 1;
Anton Ivanova9c52c22018-12-05 12:37:39 +0000357 err = add_munmap(addr, last - addr, &hvc);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700358 if (err < 0)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700359 panic("munmap failed, errno = %d\n",
360 -err);
361 }
362 addr = last;
363 continue;
364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700366 pud = pud_offset(pgd, addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700367 if (!pud_present(*pud)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700368 last = ADD_ROUND(addr, PUD_SIZE);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700369 if (last > end)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700370 last = end;
Jeff Dikeba180fd2007-10-16 01:27:00 -0700371 if (pud_newpage(*pud)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700372 updated = 1;
Anton Ivanova9c52c22018-12-05 12:37:39 +0000373 err = add_munmap(addr, last - addr, &hvc);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700374 if (err < 0)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700375 panic("munmap failed, errno = %d\n",
376 -err);
377 }
378 addr = last;
379 continue;
380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700382 pmd = pmd_offset(pud, addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700383 if (!pmd_present(*pmd)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700384 last = ADD_ROUND(addr, PMD_SIZE);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700385 if (last > end)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700386 last = end;
Jeff Dikeba180fd2007-10-16 01:27:00 -0700387 if (pmd_newpage(*pmd)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700388 updated = 1;
Anton Ivanova9c52c22018-12-05 12:37:39 +0000389 err = add_munmap(addr, last - addr, &hvc);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700390 if (err < 0)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700391 panic("munmap failed, errno = %d\n",
392 -err);
393 }
394 addr = last;
395 continue;
396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700398 pte = pte_offset_kernel(pmd, addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700399 if (!pte_present(*pte) || pte_newpage(*pte)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700400 updated = 1;
Anton Ivanova9c52c22018-12-05 12:37:39 +0000401 err = add_munmap(addr, PAGE_SIZE, &hvc);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700402 if (err < 0)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700403 panic("munmap failed, errno = %d\n",
404 -err);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700405 if (pte_present(*pte))
Anton Ivanova9c52c22018-12-05 12:37:39 +0000406 err = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
407 PAGE_SIZE, 0, &hvc);
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700408 }
Jeff Dikeba180fd2007-10-16 01:27:00 -0700409 else if (pte_newprot(*pte)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700410 updated = 1;
Anton Ivanova9c52c22018-12-05 12:37:39 +0000411 err = add_mprotect(addr, PAGE_SIZE, 0, &hvc);
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700412 }
413 addr += PAGE_SIZE;
414 }
Anton Ivanova9c52c22018-12-05 12:37:39 +0000415 if (!err)
416 err = do_ops(&hvc, hvc.index, 1);
417
418 if (err < 0)
419 panic("flush_tlb_kernel failed, errno = %d\n", err);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700420 return updated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
422
Jeff Dike77bf4402007-10-16 01:26:58 -0700423void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
424{
425 pgd_t *pgd;
426 pud_t *pud;
427 pmd_t *pmd;
428 pte_t *pte;
429 struct mm_struct *mm = vma->vm_mm;
430 void *flush = NULL;
431 int r, w, x, prot, err = 0;
432 struct mm_id *mm_id;
433
434 address &= PAGE_MASK;
435 pgd = pgd_offset(mm, address);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700436 if (!pgd_present(*pgd))
Jeff Dike77bf4402007-10-16 01:26:58 -0700437 goto kill;
438
439 pud = pud_offset(pgd, address);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700440 if (!pud_present(*pud))
Jeff Dike77bf4402007-10-16 01:26:58 -0700441 goto kill;
442
443 pmd = pmd_offset(pud, address);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700444 if (!pmd_present(*pmd))
Jeff Dike77bf4402007-10-16 01:26:58 -0700445 goto kill;
446
447 pte = pte_offset_kernel(pmd, address);
448
449 r = pte_read(*pte);
450 w = pte_write(*pte);
451 x = pte_exec(*pte);
452 if (!pte_young(*pte)) {
453 r = 0;
454 w = 0;
455 } else if (!pte_dirty(*pte)) {
456 w = 0;
457 }
458
Jeff Dike6c738ff2007-10-16 01:27:06 -0700459 mm_id = &mm->context.id;
Jeff Dike77bf4402007-10-16 01:26:58 -0700460 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
461 (x ? UM_PROT_EXEC : 0));
Jeff Dikeba180fd2007-10-16 01:27:00 -0700462 if (pte_newpage(*pte)) {
463 if (pte_present(*pte)) {
Jeff Dike77bf4402007-10-16 01:26:58 -0700464 unsigned long long offset;
465 int fd;
466
467 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
468 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
469 1, &flush);
470 }
471 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
472 }
Jeff Dikeba180fd2007-10-16 01:27:00 -0700473 else if (pte_newprot(*pte))
Jeff Dike77bf4402007-10-16 01:26:58 -0700474 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
475
Richard Weinberger70c82052015-10-25 22:26:09 +0100476 if (err) {
477 if (err == -ENOMEM)
478 report_enomem();
479
Jeff Dike77bf4402007-10-16 01:26:58 -0700480 goto kill;
Richard Weinberger70c82052015-10-25 22:26:09 +0100481 }
Jeff Dike77bf4402007-10-16 01:26:58 -0700482
483 *pte = pte_mkuptodate(*pte);
484
485 return;
486
487kill:
Jeff Dikeba180fd2007-10-16 01:27:00 -0700488 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
Jeff Dike77bf4402007-10-16 01:26:58 -0700489 force_sig(SIGKILL, current);
490}
491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
493{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700494 return pgd_offset(mm, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
496
497pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
498{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700499 return pud_offset(pgd, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500}
501
502pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
503{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700504 return pmd_offset(pud, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505}
506
507pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
508{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700509 return pte_offset_kernel(pmd, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510}
511
512pte_t *addr_pte(struct task_struct *task, unsigned long addr)
513{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700514 pgd_t *pgd = pgd_offset(task->mm, addr);
515 pud_t *pud = pud_offset(pgd, addr);
516 pmd_t *pmd = pmd_offset(pud, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
Jeff Dikeba180fd2007-10-16 01:27:00 -0700518 return pte_offset_map(pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
Jeff Diked67b5692005-07-07 17:56:49 -0700521void flush_tlb_all(void)
522{
Anton Ivanov38e3cbd2018-12-05 12:37:40 +0000523 /*
524 * Don't bother flushing if this address space is about to be
525 * destroyed.
526 */
527 if (atomic_read(&current->mm->mm_users) == 0)
528 return;
529
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700530 flush_tlb_mm(current->mm);
Jeff Diked67b5692005-07-07 17:56:49 -0700531}
532
533void flush_tlb_kernel_range(unsigned long start, unsigned long end)
534{
Jeff Dike6aa802c2007-10-16 01:26:56 -0700535 flush_tlb_kernel_range_common(start, end);
Jeff Diked67b5692005-07-07 17:56:49 -0700536}
537
538void flush_tlb_kernel_vm(void)
539{
Jeff Dike6aa802c2007-10-16 01:26:56 -0700540 flush_tlb_kernel_range_common(start_vm, end_vm);
Jeff Diked67b5692005-07-07 17:56:49 -0700541}
542
543void __flush_tlb_one(unsigned long addr)
544{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700545 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
Jeff Dike77bf4402007-10-16 01:26:58 -0700546}
547
Jeff Dike77bf4402007-10-16 01:26:58 -0700548static void fix_range(struct mm_struct *mm, unsigned long start_addr,
549 unsigned long end_addr, int force)
550{
Anton Ivanov38e3cbd2018-12-05 12:37:40 +0000551 /*
552 * Don't bother flushing if this address space is about to be
553 * destroyed.
554 */
555 if (atomic_read(&mm->mm_users) == 0)
556 return;
557
Jeff Dike1466abf2007-10-16 01:27:12 -0700558 fix_range_common(mm, start_addr, end_addr, force);
Jeff Diked67b5692005-07-07 17:56:49 -0700559}
560
561void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
562 unsigned long end)
563{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700564 if (vma->vm_mm == NULL)
565 flush_tlb_kernel_range_common(start, end);
566 else fix_range(vma->vm_mm, start, end, 0);
Jeff Diked67b5692005-07-07 17:56:49 -0700567}
Al Viro73395a02011-08-18 20:14:10 +0100568EXPORT_SYMBOL(flush_tlb_range);
Jeff Diked67b5692005-07-07 17:56:49 -0700569
Jeff Dike0b4e2732008-02-04 22:31:07 -0800570void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
571 unsigned long end)
Jeff Diked67b5692005-07-07 17:56:49 -0700572{
Jeff Dike0b4e2732008-02-04 22:31:07 -0800573 fix_range(mm, start, end, 0);
574}
575
576void flush_tlb_mm(struct mm_struct *mm)
577{
578 struct vm_area_struct *vma = mm->mmap;
579
580 while (vma != NULL) {
581 fix_range(mm, vma->vm_start, vma->vm_end, 0);
582 vma = vma->vm_next;
583 }
Jeff Diked67b5692005-07-07 17:56:49 -0700584}
585
586void force_flush_all(void)
587{
Jeff Dike77bf4402007-10-16 01:26:58 -0700588 struct mm_struct *mm = current->mm;
589 struct vm_area_struct *vma = mm->mmap;
590
Jeff Dikeba180fd2007-10-16 01:27:00 -0700591 while (vma != NULL) {
Jeff Dike77bf4402007-10-16 01:26:58 -0700592 fix_range(mm, vma->vm_start, vma->vm_end, 1);
593 vma = vma->vm_next;
594 }
Jeff Diked67b5692005-07-07 17:56:49 -0700595}