Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 2 | /* Page Fault Handling for ARC (TLB Miss / ProtV) |
| 3 | * |
| 4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/signal.h> |
| 8 | #include <linux/interrupt.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 9 | #include <linux/sched/signal.h> |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 10 | #include <linux/errno.h> |
| 11 | #include <linux/ptrace.h> |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 12 | #include <linux/uaccess.h> |
| 13 | #include <linux/kdebug.h> |
Vineet Gupta | ceed97a | 2014-10-02 12:30:42 +0530 | [diff] [blame] | 14 | #include <linux/perf_event.h> |
Souptick Joarder | 50a7ca3 | 2018-08-17 15:44:47 -0700 | [diff] [blame] | 15 | #include <linux/mm_types.h> |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 16 | #include <asm/pgalloc.h> |
Vineet Gupta | da1677b0 | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 17 | #include <asm/mmu.h> |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 18 | |
Vineet Gupta | 336e213 | 2015-03-05 17:06:31 +0530 | [diff] [blame] | 19 | /* |
| 20 | * kernel virtual address is required to implement vmalloc/pkmap/fixmap |
| 21 | * Refer to asm/processor.h for System Memory Map |
| 22 | * |
| 23 | * It simply copies the PMD entry (pointer to 2nd level page table or hugepage) |
| 24 | * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared |
| 25 | */ |
| 26 | noinline static int handle_kernel_vaddr_fault(unsigned long address) |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 27 | { |
| 28 | /* |
| 29 | * Synchronize this task's top level page-table |
| 30 | * with the 'reference' page table. |
| 31 | */ |
| 32 | pgd_t *pgd, *pgd_k; |
| 33 | pud_t *pud, *pud_k; |
| 34 | pmd_t *pmd, *pmd_k; |
| 35 | |
Vineet Gupta | 9c41f4e | 2013-11-02 17:47:49 +0530 | [diff] [blame] | 36 | pgd = pgd_offset_fast(current->active_mm, address); |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 37 | pgd_k = pgd_offset_k(address); |
| 38 | |
| 39 | if (!pgd_present(*pgd_k)) |
| 40 | goto bad_area; |
| 41 | |
| 42 | pud = pud_offset(pgd, address); |
| 43 | pud_k = pud_offset(pgd_k, address); |
| 44 | if (!pud_present(*pud_k)) |
| 45 | goto bad_area; |
| 46 | |
| 47 | pmd = pmd_offset(pud, address); |
| 48 | pmd_k = pmd_offset(pud_k, address); |
| 49 | if (!pmd_present(*pmd_k)) |
| 50 | goto bad_area; |
| 51 | |
| 52 | set_pmd(pmd, *pmd_k); |
| 53 | |
| 54 | /* XXX: create the TLB entry here */ |
| 55 | return 0; |
| 56 | |
| 57 | bad_area: |
| 58 | return 1; |
| 59 | } |
| 60 | |
Vineet Gupta | 21a63b5 | 2013-09-18 16:25:40 +0530 | [diff] [blame] | 61 | void do_page_fault(unsigned long address, struct pt_regs *regs) |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 62 | { |
| 63 | struct vm_area_struct *vma = NULL; |
| 64 | struct task_struct *tsk = current; |
| 65 | struct mm_struct *mm = tsk->mm; |
Eugeniy Paltsev | a8c715b | 2019-05-13 20:28:00 +0300 | [diff] [blame] | 66 | int si_code = SEGV_MAPERR; |
Souptick Joarder | 50a7ca3 | 2018-08-17 15:44:47 -0700 | [diff] [blame] | 67 | int ret; |
| 68 | vm_fault_t fault; |
Vineet Gupta | 38a9ff6 | 2013-06-12 15:13:40 +0530 | [diff] [blame] | 69 | int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ |
Johannes Weiner | 759496b | 2013-09-12 15:13:39 -0700 | [diff] [blame] | 70 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 71 | |
| 72 | /* |
| 73 | * We fault-in kernel-space virtual memory on-demand. The |
| 74 | * 'reference' page table is init_mm.pgd. |
| 75 | * |
| 76 | * NOTE! We MUST NOT take any locks for this case. We may |
| 77 | * be in an interrupt or a critical region, and should |
| 78 | * only copy the information from the master page table, |
| 79 | * nothing more. |
| 80 | */ |
Eugeniy Paltsev | a8c715b | 2019-05-13 20:28:00 +0300 | [diff] [blame] | 81 | if (address >= VMALLOC_START && !user_mode(regs)) { |
Vineet Gupta | 336e213 | 2015-03-05 17:06:31 +0530 | [diff] [blame] | 82 | ret = handle_kernel_vaddr_fault(address); |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 83 | if (unlikely(ret)) |
Eugeniy Paltsev | a8c715b | 2019-05-13 20:28:00 +0300 | [diff] [blame] | 84 | goto no_context; |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 85 | else |
| 86 | return; |
| 87 | } |
| 88 | |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 89 | /* |
| 90 | * If we're in an interrupt or have no user |
| 91 | * context, we must not take the fault.. |
| 92 | */ |
David Hildenbrand | 70ffdb9 | 2015-05-11 17:52:11 +0200 | [diff] [blame] | 93 | if (faulthandler_disabled() || !mm) |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 94 | goto no_context; |
| 95 | |
Johannes Weiner | 759496b | 2013-09-12 15:13:39 -0700 | [diff] [blame] | 96 | if (user_mode(regs)) |
| 97 | flags |= FAULT_FLAG_USER; |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 98 | retry: |
| 99 | down_read(&mm->mmap_sem); |
| 100 | vma = find_vma(mm, address); |
| 101 | if (!vma) |
| 102 | goto bad_area; |
| 103 | if (vma->vm_start <= address) |
| 104 | goto good_area; |
| 105 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 106 | goto bad_area; |
| 107 | if (expand_stack(vma, address)) |
| 108 | goto bad_area; |
| 109 | |
| 110 | /* |
| 111 | * Ok, we have a good vm_area for this memory access, so |
| 112 | * we can handle it.. |
| 113 | */ |
| 114 | good_area: |
Eric W. Biederman | 15773ae | 2017-08-01 13:41:34 -0500 | [diff] [blame] | 115 | si_code = SEGV_ACCERR; |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 116 | |
| 117 | /* Handle protection violation, execute on heap or stack */ |
| 118 | |
Vineet Gupta | 38a9ff6 | 2013-06-12 15:13:40 +0530 | [diff] [blame] | 119 | if ((regs->ecr_vec == ECR_V_PROTV) && |
| 120 | (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 121 | goto bad_area; |
| 122 | |
| 123 | if (write) { |
| 124 | if (!(vma->vm_flags & VM_WRITE)) |
| 125 | goto bad_area; |
Johannes Weiner | 759496b | 2013-09-12 15:13:39 -0700 | [diff] [blame] | 126 | flags |= FAULT_FLAG_WRITE; |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 127 | } else { |
| 128 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
| 129 | goto bad_area; |
| 130 | } |
| 131 | |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 132 | /* |
| 133 | * If for any reason at all we couldn't handle the fault, |
| 134 | * make sure we exit gracefully rather than endlessly redo |
| 135 | * the fault. |
| 136 | */ |
Kirill A. Shutemov | dcddffd | 2016-07-26 15:25:18 -0700 | [diff] [blame] | 137 | fault = handle_mm_fault(vma, address, flags); |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 138 | |
Davidlohr Bueso | d8d7d84 | 2019-01-03 15:28:51 -0800 | [diff] [blame] | 139 | if (fatal_signal_pending(current)) { |
Vineet Gupta | 4d44745 | 2018-12-10 16:56:45 -0800 | [diff] [blame] | 140 | |
| 141 | /* |
| 142 | * if fault retry, mmap_sem already relinquished by core mm |
| 143 | * so OK to return to user mode (with signal handled first) |
| 144 | */ |
| 145 | if (fault & VM_FAULT_RETRY) { |
| 146 | if (!user_mode(regs)) |
| 147 | goto no_context; |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 148 | return; |
Vineet Gupta | 4d44745 | 2018-12-10 16:56:45 -0800 | [diff] [blame] | 149 | } |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 150 | } |
| 151 | |
Vineet Gupta | ceed97a | 2014-10-02 12:30:42 +0530 | [diff] [blame] | 152 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
| 153 | |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 154 | if (likely(!(fault & VM_FAULT_ERROR))) { |
| 155 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
| 156 | /* To avoid updating stats twice for retry case */ |
Vineet Gupta | ceed97a | 2014-10-02 12:30:42 +0530 | [diff] [blame] | 157 | if (fault & VM_FAULT_MAJOR) { |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 158 | tsk->maj_flt++; |
Vineet Gupta | ceed97a | 2014-10-02 12:30:42 +0530 | [diff] [blame] | 159 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
| 160 | regs, address); |
| 161 | } else { |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 162 | tsk->min_flt++; |
Vineet Gupta | ceed97a | 2014-10-02 12:30:42 +0530 | [diff] [blame] | 163 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, |
| 164 | regs, address); |
| 165 | } |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 166 | |
| 167 | if (fault & VM_FAULT_RETRY) { |
| 168 | flags &= ~FAULT_FLAG_ALLOW_RETRY; |
| 169 | flags |= FAULT_FLAG_TRIED; |
| 170 | goto retry; |
| 171 | } |
| 172 | } |
| 173 | |
| 174 | /* Fault Handled Gracefully */ |
| 175 | up_read(&mm->mmap_sem); |
| 176 | return; |
| 177 | } |
| 178 | |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 179 | if (fault & VM_FAULT_OOM) |
| 180 | goto out_of_memory; |
Guenter Roeck | e262eb9 | 2015-01-29 19:15:33 -0800 | [diff] [blame] | 181 | else if (fault & VM_FAULT_SIGSEGV) |
Linus Torvalds | 33692f2 | 2015-01-29 10:51:32 -0800 | [diff] [blame] | 182 | goto bad_area; |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 183 | else if (fault & VM_FAULT_SIGBUS) |
| 184 | goto do_sigbus; |
| 185 | |
| 186 | /* no man's land */ |
| 187 | BUG(); |
| 188 | |
| 189 | /* |
| 190 | * Something tried to access memory that isn't in our memory map.. |
| 191 | * Fix it, but check if it's kernel or user first.. |
| 192 | */ |
| 193 | bad_area: |
| 194 | up_read(&mm->mmap_sem); |
| 195 | |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 196 | /* User mode accesses just cause a SIGSEGV */ |
| 197 | if (user_mode(regs)) { |
| 198 | tsk->thread.fault_address = address; |
Eric W. Biederman | 15773ae | 2017-08-01 13:41:34 -0500 | [diff] [blame] | 199 | force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 200 | return; |
| 201 | } |
| 202 | |
| 203 | no_context: |
| 204 | /* Are we prepared to handle this kernel fault? |
| 205 | * |
| 206 | * (The kernel has valid exception-points in the source |
Liav Rehana | ddf720f | 2017-05-28 09:52:00 +0300 | [diff] [blame] | 207 | * when it accesses user-memory. When it fails in one |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 208 | * of those points, we find it in a table and do a jump |
| 209 | * to some fixup code that loads an appropriate error |
| 210 | * code) |
| 211 | */ |
| 212 | if (fixup_exception(regs)) |
| 213 | return; |
| 214 | |
Vineet Gupta | 38a9ff6 | 2013-06-12 15:13:40 +0530 | [diff] [blame] | 215 | die("Oops", regs, address); |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 216 | |
| 217 | out_of_memory: |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 218 | up_read(&mm->mmap_sem); |
| 219 | |
Johannes Weiner | 609838c | 2013-07-08 15:59:50 -0700 | [diff] [blame] | 220 | if (user_mode(regs)) { |
| 221 | pagefault_out_of_memory(); |
| 222 | return; |
| 223 | } |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 224 | |
| 225 | goto no_context; |
| 226 | |
| 227 | do_sigbus: |
| 228 | up_read(&mm->mmap_sem); |
| 229 | |
| 230 | if (!user_mode(regs)) |
| 231 | goto no_context; |
| 232 | |
| 233 | tsk->thread.fault_address = address; |
Eric W. Biederman | 15773ae | 2017-08-01 13:41:34 -0500 | [diff] [blame] | 234 | force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); |
Vineet Gupta | fbd7053 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 235 | } |