blob: 00de86efccaa08a183f1cfd0854350d3502eb793 [file] [log] [blame]
Jeff Dike5e1f65a2006-09-25 23:33:01 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/mm.h"
7#include "asm/page.h"
8#include "asm/pgalloc.h"
Jeff Dike7f0536f2007-05-06 14:51:30 -07009#include "asm/pgtable.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include "asm/tlbflush.h"
11#include "choose-mode.h"
12#include "mode_kern.h"
Jeff Dike4ff83ce2007-05-06 14:51:08 -070013#include "as-layout.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "tlb.h"
15#include "mem.h"
16#include "mem_user.h"
17#include "os.h"
18
Jeff Dikec5600492005-09-03 15:57:36 -070019static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
Jeff Dike5e1f65a2006-09-25 23:33:01 -070020 int r, int w, int x, struct host_vm_op *ops, int *index,
Jeff Dikec5600492005-09-03 15:57:36 -070021 int last_filled, union mm_context *mmu, void **flush,
Bodo Stroesser07bf7312005-09-03 15:57:50 -070022 int (*do_ops)(union mm_context *, struct host_vm_op *,
23 int, int, void **))
Jeff Dikec5600492005-09-03 15:57:36 -070024{
Jeff Dike5e1f65a2006-09-25 23:33:01 -070025 __u64 offset;
Jeff Dikec5600492005-09-03 15:57:36 -070026 struct host_vm_op *last;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070027 int fd, ret = 0;
Jeff Dikec5600492005-09-03 15:57:36 -070028
29 fd = phys_mapping(phys, &offset);
Bodo Stroesser07bf7312005-09-03 15:57:50 -070030 if(*index != -1){
31 last = &ops[*index];
Jeff Dikec5600492005-09-03 15:57:36 -070032 if((last->type == MMAP) &&
33 (last->u.mmap.addr + last->u.mmap.len == virt) &&
34 (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
35 (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
36 (last->u.mmap.offset + last->u.mmap.len == offset)){
37 last->u.mmap.len += len;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070038 return 0;
Jeff Dikec5600492005-09-03 15:57:36 -070039 }
40 }
41
Bodo Stroesser07bf7312005-09-03 15:57:50 -070042 if(*index == last_filled){
43 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
44 *index = -1;
Jeff Dikec5600492005-09-03 15:57:36 -070045 }
46
Bodo Stroesser07bf7312005-09-03 15:57:50 -070047 ops[++*index] = ((struct host_vm_op) { .type = MMAP,
48 .u = { .mmap = {
49 .addr = virt,
50 .len = len,
51 .r = r,
52 .w = w,
53 .x = x,
54 .fd = fd,
55 .offset = offset }
56 } });
57 return ret;
Jeff Dikec5600492005-09-03 15:57:36 -070058}
59
60static int add_munmap(unsigned long addr, unsigned long len,
Bodo Stroesser07bf7312005-09-03 15:57:50 -070061 struct host_vm_op *ops, int *index, int last_filled,
Jeff Dikec5600492005-09-03 15:57:36 -070062 union mm_context *mmu, void **flush,
Bodo Stroesser07bf7312005-09-03 15:57:50 -070063 int (*do_ops)(union mm_context *, struct host_vm_op *,
64 int, int, void **))
Jeff Dikec5600492005-09-03 15:57:36 -070065{
66 struct host_vm_op *last;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070067 int ret = 0;
Jeff Dikec5600492005-09-03 15:57:36 -070068
Bodo Stroesser07bf7312005-09-03 15:57:50 -070069 if(*index != -1){
70 last = &ops[*index];
Jeff Dikec5600492005-09-03 15:57:36 -070071 if((last->type == MUNMAP) &&
72 (last->u.munmap.addr + last->u.mmap.len == addr)){
73 last->u.munmap.len += len;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070074 return 0;
Jeff Dikec5600492005-09-03 15:57:36 -070075 }
76 }
77
Bodo Stroesser07bf7312005-09-03 15:57:50 -070078 if(*index == last_filled){
79 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
80 *index = -1;
Jeff Dikec5600492005-09-03 15:57:36 -070081 }
82
Bodo Stroesser07bf7312005-09-03 15:57:50 -070083 ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
84 .u = { .munmap = {
85 .addr = addr,
86 .len = len } } });
87 return ret;
Jeff Dikec5600492005-09-03 15:57:36 -070088}
89
90static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
Bodo Stroesser07bf7312005-09-03 15:57:50 -070091 int x, struct host_vm_op *ops, int *index,
Jeff Dikec5600492005-09-03 15:57:36 -070092 int last_filled, union mm_context *mmu, void **flush,
Jeff Dike5e1f65a2006-09-25 23:33:01 -070093 int (*do_ops)(union mm_context *, struct host_vm_op *,
Bodo Stroesser07bf7312005-09-03 15:57:50 -070094 int, int, void **))
Jeff Dikec5600492005-09-03 15:57:36 -070095{
96 struct host_vm_op *last;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070097 int ret = 0;
Jeff Dikec5600492005-09-03 15:57:36 -070098
Bodo Stroesser07bf7312005-09-03 15:57:50 -070099 if(*index != -1){
100 last = &ops[*index];
Jeff Dikec5600492005-09-03 15:57:36 -0700101 if((last->type == MPROTECT) &&
102 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
103 (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
104 (last->u.mprotect.x == x)){
105 last->u.mprotect.len += len;
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700106 return 0;
Jeff Dikec5600492005-09-03 15:57:36 -0700107 }
108 }
109
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700110 if(*index == last_filled){
111 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
112 *index = -1;
Jeff Dikec5600492005-09-03 15:57:36 -0700113 }
114
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700115 ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
116 .u = { .mprotect = {
117 .addr = addr,
118 .len = len,
119 .r = r,
120 .w = w,
121 .x = x } } });
122 return ret;
Jeff Dikec5600492005-09-03 15:57:36 -0700123}
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
126
Jeff Dike7f0536f2007-05-06 14:51:30 -0700127static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
128 unsigned long end, struct host_vm_op *ops,
129 int last_op, int *op_index, int force,
130 union mm_context *mmu, void **flush,
131 int (*do_ops)(union mm_context *,
132 struct host_vm_op *, int, int,
133 void **))
134{
135 pte_t *pte;
136 int r, w, x, ret = 0;
137
138 pte = pte_offset_kernel(pmd, addr);
139 do {
140 r = pte_read(*pte);
141 w = pte_write(*pte);
142 x = pte_exec(*pte);
143 if (!pte_young(*pte)) {
144 r = 0;
145 w = 0;
146 } else if (!pte_dirty(*pte)) {
147 w = 0;
148 }
149 if(force || pte_newpage(*pte)){
150 if(pte_present(*pte))
151 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
152 PAGE_SIZE, r, w, x, ops,
153 op_index, last_op, mmu, flush,
154 do_ops);
155 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
156 last_op, mmu, flush, do_ops);
157 }
158 else if(pte_newprot(*pte))
159 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
160 op_index, last_op, mmu, flush,
161 do_ops);
162 *pte = pte_mkuptodate(*pte);
163 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
164 return ret;
165}
166
167static inline int update_pmd_range(pud_t *pud, unsigned long addr,
168 unsigned long end, struct host_vm_op *ops,
169 int last_op, int *op_index, int force,
170 union mm_context *mmu, void **flush,
171 int (*do_ops)(union mm_context *,
172 struct host_vm_op *, int, int,
173 void **))
174{
175 pmd_t *pmd;
176 unsigned long next;
177 int ret = 0;
178
179 pmd = pmd_offset(pud, addr);
180 do {
181 next = pmd_addr_end(addr, end);
182 if(!pmd_present(*pmd)){
183 if(force || pmd_newpage(*pmd)){
184 ret = add_munmap(addr, next - addr, ops,
185 op_index, last_op, mmu,
186 flush, do_ops);
187 pmd_mkuptodate(*pmd);
188 }
189 }
190 else ret = update_pte_range(pmd, addr, next, ops, last_op,
191 op_index, force, mmu, flush,
192 do_ops);
193 } while (pmd++, addr = next, ((addr != end) && !ret));
194 return ret;
195}
196
197static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
198 unsigned long end, struct host_vm_op *ops,
199 int last_op, int *op_index, int force,
200 union mm_context *mmu, void **flush,
201 int (*do_ops)(union mm_context *,
202 struct host_vm_op *, int, int,
203 void **))
204{
205 pud_t *pud;
206 unsigned long next;
207 int ret = 0;
208
209 pud = pud_offset(pgd, addr);
210 do {
211 next = pud_addr_end(addr, end);
212 if(!pud_present(*pud)){
213 if(force || pud_newpage(*pud)){
214 ret = add_munmap(addr, next - addr, ops,
215 op_index, last_op, mmu,
216 flush, do_ops);
217 pud_mkuptodate(*pud);
218 }
219 }
220 else ret = update_pmd_range(pud, addr, next, ops, last_op,
221 op_index, force, mmu, flush,
222 do_ops);
223 } while (pud++, addr = next, ((addr != end) && !ret));
224 return ret;
225}
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700228 unsigned long end_addr, int force,
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700229 int (*do_ops)(union mm_context *, struct host_vm_op *,
230 int, int, void **))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231{
Jeff Dike7f0536f2007-05-06 14:51:30 -0700232 pgd_t *pgd;
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700233 union mm_context *mmu = &mm->context;
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700234 struct host_vm_op ops[1];
Jeff Dike7f0536f2007-05-06 14:51:30 -0700235 unsigned long addr = start_addr, next;
236 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700237 void *flush = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700239 ops[0].type = NONE;
Jeff Dike7f0536f2007-05-06 14:51:30 -0700240 pgd = pgd_offset(mm, addr);
241 do {
242 next = pgd_addr_end(addr, end_addr);
243 if(!pgd_present(*pgd)){
244 if (force || pgd_newpage(*pgd)){
245 ret = add_munmap(addr, next - addr, ops,
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700246 &op_index, last_op, mmu,
247 &flush, do_ops);
Jeff Dike7f0536f2007-05-06 14:51:30 -0700248 pgd_mkuptodate(*pgd);
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700249 }
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700250 }
Jeff Dike7f0536f2007-05-06 14:51:30 -0700251 else ret = update_pud_range(pgd, addr, next, ops, last_op,
252 &op_index, force, mmu, &flush,
253 do_ops);
254 } while (pgd++, addr = next, ((addr != end_addr) && !ret));
Jeff Dike7f0536f2007-05-06 14:51:30 -0700255 log_info("total flush time - %Ld nsecs\n", end_time - start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700257 if(!ret)
258 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
259
Jeff Dike7f0536f2007-05-06 14:51:30 -0700260 /* This is not an else because ret is modified above */
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700261 if(ret) {
262 printk("fix_range_common: failed, killing current process\n");
263 force_sig(SIGKILL, current);
264 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265}
266
267int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
268{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700269 struct mm_struct *mm;
270 pgd_t *pgd;
271 pud_t *pud;
272 pmd_t *pmd;
273 pte_t *pte;
274 unsigned long addr, last;
275 int updated = 0, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700277 mm = &init_mm;
278 for(addr = start; addr < end;){
279 pgd = pgd_offset(mm, addr);
280 if(!pgd_present(*pgd)){
281 last = ADD_ROUND(addr, PGDIR_SIZE);
282 if(last > end)
283 last = end;
284 if(pgd_newpage(*pgd)){
285 updated = 1;
286 err = os_unmap_memory((void *) addr,
287 last - addr);
288 if(err < 0)
289 panic("munmap failed, errno = %d\n",
290 -err);
291 }
292 addr = last;
293 continue;
294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700296 pud = pud_offset(pgd, addr);
297 if(!pud_present(*pud)){
298 last = ADD_ROUND(addr, PUD_SIZE);
299 if(last > end)
300 last = end;
301 if(pud_newpage(*pud)){
302 updated = 1;
303 err = os_unmap_memory((void *) addr,
304 last - addr);
305 if(err < 0)
306 panic("munmap failed, errno = %d\n",
307 -err);
308 }
309 addr = last;
310 continue;
311 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700313 pmd = pmd_offset(pud, addr);
314 if(!pmd_present(*pmd)){
315 last = ADD_ROUND(addr, PMD_SIZE);
316 if(last > end)
317 last = end;
318 if(pmd_newpage(*pmd)){
319 updated = 1;
320 err = os_unmap_memory((void *) addr,
321 last - addr);
322 if(err < 0)
323 panic("munmap failed, errno = %d\n",
324 -err);
325 }
326 addr = last;
327 continue;
328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700330 pte = pte_offset_kernel(pmd, addr);
331 if(!pte_present(*pte) || pte_newpage(*pte)){
332 updated = 1;
333 err = os_unmap_memory((void *) addr,
334 PAGE_SIZE);
335 if(err < 0)
336 panic("munmap failed, errno = %d\n",
337 -err);
338 if(pte_present(*pte))
339 map_memory(addr,
340 pte_val(*pte) & PAGE_MASK,
341 PAGE_SIZE, 1, 1, 1);
342 }
343 else if(pte_newprot(*pte)){
344 updated = 1;
345 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
346 }
347 addr += PAGE_SIZE;
348 }
349 return(updated);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350}
351
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
353{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700354 return(pgd_offset(mm, address));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
356
357pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
358{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700359 return(pud_offset(pgd, address));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360}
361
362pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
363{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700364 return(pmd_offset(pud, address));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365}
366
367pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
368{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700369 return(pte_offset_kernel(pmd, address));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370}
371
372pte_t *addr_pte(struct task_struct *task, unsigned long addr)
373{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700374 pgd_t *pgd = pgd_offset(task->mm, addr);
375 pud_t *pud = pud_offset(pgd, addr);
376 pmd_t *pmd = pmd_offset(pud, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700378 return(pte_offset_map(pmd, addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379}
380
Jeff Diked67b5692005-07-07 17:56:49 -0700381void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
382{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700383 address &= PAGE_MASK;
Jeff Dike64f60842007-05-06 14:51:45 -0700384
385 CHOOSE_MODE(flush_tlb_range(vma, address, address + PAGE_SIZE),
386 flush_tlb_page_skas(vma, address));
Jeff Diked67b5692005-07-07 17:56:49 -0700387}
388
389void flush_tlb_all(void)
390{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700391 flush_tlb_mm(current->mm);
Jeff Diked67b5692005-07-07 17:56:49 -0700392}
393
394void flush_tlb_kernel_range(unsigned long start, unsigned long end)
395{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700396 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
397 flush_tlb_kernel_range_common, start, end);
Jeff Diked67b5692005-07-07 17:56:49 -0700398}
399
400void flush_tlb_kernel_vm(void)
401{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700402 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
403 flush_tlb_kernel_range_common(start_vm, end_vm));
Jeff Diked67b5692005-07-07 17:56:49 -0700404}
405
406void __flush_tlb_one(unsigned long addr)
407{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700408 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
Jeff Diked67b5692005-07-07 17:56:49 -0700409}
410
411void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
412 unsigned long end)
413{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700414 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
415 end);
Jeff Diked67b5692005-07-07 17:56:49 -0700416}
417
418void flush_tlb_mm(struct mm_struct *mm)
419{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700420 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
Jeff Diked67b5692005-07-07 17:56:49 -0700421}
422
423void force_flush_all(void)
424{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700425 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
Jeff Diked67b5692005-07-07 17:56:49 -0700426}
427