Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds |
| 4 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
| 5 | * Copyright (C) 2002 Andi Kleen |
Thomas Gleixner | 78aa1f6 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 6 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * This handles calls from both 32bit and 64bit mode. |
Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame] | 8 | * |
| 9 | * Lock order: |
| 10 | * contex.ldt_usr_sem |
| 11 | * mmap_sem |
| 12 | * context.lock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | */ |
| 14 | |
| 15 | #include <linux/errno.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/sched.h> |
| 18 | #include <linux/string.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/smp.h> |
Dave Hansen | da20ab3 | 2017-10-18 10:21:07 -0700 | [diff] [blame] | 21 | #include <linux/syscalls.h> |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 22 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <linux/vmalloc.h> |
Jaswinder Singh Rajput | 423a540 | 2008-12-31 16:42:20 +0530 | [diff] [blame] | 24 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <asm/ldt.h> |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 27 | #include <asm/tlb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <asm/desc.h> |
Thomas Gleixner | 70f5088 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 29 | #include <asm/mmu_context.h> |
Jaswinder Singh | bbc1f69 | 2008-07-21 21:34:13 +0530 | [diff] [blame] | 30 | #include <asm/syscalls.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
Andy Lutomirski | a632375 | 2017-07-26 07:16:30 -0700 | [diff] [blame] | 32 | static void refresh_ldt_segments(void) |
| 33 | { |
| 34 | #ifdef CONFIG_X86_64 |
| 35 | unsigned short sel; |
| 36 | |
| 37 | /* |
| 38 | * Make sure that the cached DS and ES descriptors match the updated |
| 39 | * LDT. |
| 40 | */ |
| 41 | savesegment(ds, sel); |
| 42 | if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) |
| 43 | loadsegment(ds, sel); |
| 44 | |
| 45 | savesegment(es, sel); |
| 46 | if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) |
| 47 | loadsegment(es, sel); |
| 48 | #endif |
| 49 | } |
| 50 | |
Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame] | 51 | /* context.lock is held by the task which issued the smp function call */ |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 52 | static void flush_ldt(void *__mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | { |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 54 | struct mm_struct *mm = __mm; |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 55 | |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 56 | if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 57 | return; |
| 58 | |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 59 | load_mm_ldt(mm); |
Andy Lutomirski | a632375 | 2017-07-26 07:16:30 -0700 | [diff] [blame] | 60 | |
| 61 | refresh_ldt_segments(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 64 | /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 65 | static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | { |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 67 | struct ldt_struct *new_ldt; |
Thomas Gleixner | 990e9dc | 2016-12-10 00:13:51 +0100 | [diff] [blame] | 68 | unsigned int alloc_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 70 | if (num_entries > LDT_ENTRIES) |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 71 | return NULL; |
| 72 | |
| 73 | new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); |
| 74 | if (!new_ldt) |
| 75 | return NULL; |
| 76 | |
| 77 | BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 78 | alloc_size = num_entries * LDT_ENTRY_SIZE; |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 79 | |
| 80 | /* |
| 81 | * Xen is very picky: it requires a page-aligned LDT that has no |
| 82 | * trailing nonzero bytes in any page that contains LDT descriptors. |
| 83 | * Keep it simple: zero the whole allocation and never allocate less |
| 84 | * than PAGE_SIZE. |
| 85 | */ |
| 86 | if (alloc_size > PAGE_SIZE) |
| 87 | new_ldt->entries = vzalloc(alloc_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | else |
Jan Beulich | f454b47 | 2015-09-02 09:45:58 -0600 | [diff] [blame] | 89 | new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 91 | if (!new_ldt->entries) { |
| 92 | kfree(new_ldt); |
| 93 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | } |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 95 | |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 96 | /* The new LDT isn't aliased for PTI yet. */ |
| 97 | new_ldt->slot = -1; |
| 98 | |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 99 | new_ldt->nr_entries = num_entries; |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 100 | return new_ldt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | } |
| 102 | |
Joerg Roedel | 9bae319 | 2018-07-18 11:41:12 +0200 | [diff] [blame] | 103 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
| 104 | |
| 105 | static void do_sanity_check(struct mm_struct *mm, |
| 106 | bool had_kernel_mapping, |
| 107 | bool had_user_mapping) |
| 108 | { |
| 109 | if (mm->context.ldt) { |
| 110 | /* |
| 111 | * We already had an LDT. The top-level entry should already |
| 112 | * have been allocated and synchronized with the usermode |
| 113 | * tables. |
| 114 | */ |
| 115 | WARN_ON(!had_kernel_mapping); |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 116 | if (boot_cpu_has(X86_FEATURE_PTI)) |
Joerg Roedel | 9bae319 | 2018-07-18 11:41:12 +0200 | [diff] [blame] | 117 | WARN_ON(!had_user_mapping); |
| 118 | } else { |
| 119 | /* |
| 120 | * This is the first time we're mapping an LDT for this process. |
| 121 | * Sync the pgd to the usermode tables. |
| 122 | */ |
| 123 | WARN_ON(had_kernel_mapping); |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 124 | if (boot_cpu_has(X86_FEATURE_PTI)) |
Joerg Roedel | 9bae319 | 2018-07-18 11:41:12 +0200 | [diff] [blame] | 125 | WARN_ON(had_user_mapping); |
| 126 | } |
| 127 | } |
| 128 | |
Joerg Roedel | 6df934b | 2018-07-18 11:41:13 +0200 | [diff] [blame] | 129 | #ifdef CONFIG_X86_PAE |
| 130 | |
| 131 | static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va) |
| 132 | { |
| 133 | p4d_t *p4d; |
| 134 | pud_t *pud; |
| 135 | |
| 136 | if (pgd->pgd == 0) |
| 137 | return NULL; |
| 138 | |
| 139 | p4d = p4d_offset(pgd, va); |
| 140 | if (p4d_none(*p4d)) |
| 141 | return NULL; |
| 142 | |
| 143 | pud = pud_offset(p4d, va); |
| 144 | if (pud_none(*pud)) |
| 145 | return NULL; |
| 146 | |
| 147 | return pmd_offset(pud, va); |
| 148 | } |
| 149 | |
| 150 | static void map_ldt_struct_to_user(struct mm_struct *mm) |
| 151 | { |
| 152 | pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); |
| 153 | pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd); |
| 154 | pmd_t *k_pmd, *u_pmd; |
| 155 | |
| 156 | k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR); |
| 157 | u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR); |
| 158 | |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 159 | if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) |
Joerg Roedel | 6df934b | 2018-07-18 11:41:13 +0200 | [diff] [blame] | 160 | set_pmd(u_pmd, *k_pmd); |
| 161 | } |
| 162 | |
| 163 | static void sanity_check_ldt_mapping(struct mm_struct *mm) |
| 164 | { |
| 165 | pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); |
| 166 | pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd); |
| 167 | bool had_kernel, had_user; |
| 168 | pmd_t *k_pmd, *u_pmd; |
| 169 | |
| 170 | k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR); |
| 171 | u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR); |
| 172 | had_kernel = (k_pmd->pmd != 0); |
| 173 | had_user = (u_pmd->pmd != 0); |
| 174 | |
| 175 | do_sanity_check(mm, had_kernel, had_user); |
| 176 | } |
| 177 | |
| 178 | #else /* !CONFIG_X86_PAE */ |
| 179 | |
Joerg Roedel | 9bae319 | 2018-07-18 11:41:12 +0200 | [diff] [blame] | 180 | static void map_ldt_struct_to_user(struct mm_struct *mm) |
| 181 | { |
| 182 | pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR); |
| 183 | |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 184 | if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) |
Joerg Roedel | 9bae319 | 2018-07-18 11:41:12 +0200 | [diff] [blame] | 185 | set_pgd(kernel_to_user_pgdp(pgd), *pgd); |
| 186 | } |
| 187 | |
| 188 | static void sanity_check_ldt_mapping(struct mm_struct *mm) |
| 189 | { |
| 190 | pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR); |
| 191 | bool had_kernel = (pgd->pgd != 0); |
| 192 | bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0); |
| 193 | |
| 194 | do_sanity_check(mm, had_kernel, had_user); |
| 195 | } |
| 196 | |
Joerg Roedel | 6df934b | 2018-07-18 11:41:13 +0200 | [diff] [blame] | 197 | #endif /* CONFIG_X86_PAE */ |
| 198 | |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 199 | /* |
| 200 | * If PTI is enabled, this maps the LDT into the kernelmode and |
| 201 | * usermode tables for the given mm. |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 202 | */ |
| 203 | static int |
| 204 | map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) |
| 205 | { |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 206 | unsigned long va; |
Joerg Roedel | 9bae319 | 2018-07-18 11:41:12 +0200 | [diff] [blame] | 207 | bool is_vmalloc; |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 208 | spinlock_t *ptl; |
Kirill A. Shutemov | a0e6e08 | 2018-10-26 15:28:55 +0300 | [diff] [blame] | 209 | int i, nr_pages; |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 210 | |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 211 | if (!boot_cpu_has(X86_FEATURE_PTI)) |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 212 | return 0; |
| 213 | |
| 214 | /* |
| 215 | * Any given ldt_struct should have map_ldt_struct() called at most |
| 216 | * once. |
| 217 | */ |
| 218 | WARN_ON(ldt->slot != -1); |
| 219 | |
Joerg Roedel | 9bae319 | 2018-07-18 11:41:12 +0200 | [diff] [blame] | 220 | /* Check if the current mappings are sane */ |
| 221 | sanity_check_ldt_mapping(mm); |
| 222 | |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 223 | is_vmalloc = is_vmalloc_addr(ldt->entries); |
| 224 | |
Kirill A. Shutemov | a0e6e08 | 2018-10-26 15:28:55 +0300 | [diff] [blame] | 225 | nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); |
| 226 | |
| 227 | for (i = 0; i < nr_pages; i++) { |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 228 | unsigned long offset = i << PAGE_SHIFT; |
| 229 | const void *src = (char *)ldt->entries + offset; |
| 230 | unsigned long pfn; |
Dave Hansen | fb43d6c | 2018-04-06 13:55:09 -0700 | [diff] [blame] | 231 | pgprot_t pte_prot; |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 232 | pte_t pte, *ptep; |
| 233 | |
| 234 | va = (unsigned long)ldt_slot_va(slot) + offset; |
| 235 | pfn = is_vmalloc ? vmalloc_to_pfn(src) : |
| 236 | page_to_pfn(virt_to_page(src)); |
| 237 | /* |
| 238 | * Treat the PTI LDT range as a *userspace* range. |
| 239 | * get_locked_pte() will allocate all needed pagetables |
| 240 | * and account for them in this mm. |
| 241 | */ |
| 242 | ptep = get_locked_pte(mm, va, &ptl); |
| 243 | if (!ptep) |
| 244 | return -ENOMEM; |
Thomas Gleixner | 9f5cb6b | 2017-12-15 20:35:11 +0100 | [diff] [blame] | 245 | /* |
| 246 | * Map it RO so the easy to find address is not a primary |
| 247 | * target via some kernel interface which misses a |
| 248 | * permission check. |
| 249 | */ |
Dave Hansen | fb43d6c | 2018-04-06 13:55:09 -0700 | [diff] [blame] | 250 | pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); |
| 251 | /* Filter out unsuppored __PAGE_KERNEL* bits: */ |
Joerg Roedel | e6f39e8 | 2018-04-16 11:43:57 +0200 | [diff] [blame] | 252 | pgprot_val(pte_prot) &= __supported_pte_mask; |
Dave Hansen | fb43d6c | 2018-04-06 13:55:09 -0700 | [diff] [blame] | 253 | pte = pfn_pte(pfn, pte_prot); |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 254 | set_pte_at(mm, va, ptep, pte); |
| 255 | pte_unmap_unlock(ptep, ptl); |
| 256 | } |
| 257 | |
Joerg Roedel | 9bae319 | 2018-07-18 11:41:12 +0200 | [diff] [blame] | 258 | /* Propagate LDT mapping to the user page-table */ |
| 259 | map_ldt_struct_to_user(mm); |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 260 | |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 261 | ldt->slot = slot; |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 262 | return 0; |
| 263 | } |
| 264 | |
Kirill A. Shutemov | a0e6e08 | 2018-10-26 15:28:55 +0300 | [diff] [blame] | 265 | static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) |
| 266 | { |
| 267 | unsigned long va; |
| 268 | int i, nr_pages; |
| 269 | |
| 270 | if (!ldt) |
| 271 | return; |
| 272 | |
| 273 | /* LDT map/unmap is only required for PTI */ |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 274 | if (!boot_cpu_has(X86_FEATURE_PTI)) |
Kirill A. Shutemov | a0e6e08 | 2018-10-26 15:28:55 +0300 | [diff] [blame] | 275 | return; |
| 276 | |
| 277 | nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); |
| 278 | |
| 279 | for (i = 0; i < nr_pages; i++) { |
| 280 | unsigned long offset = i << PAGE_SHIFT; |
| 281 | spinlock_t *ptl; |
| 282 | pte_t *ptep; |
| 283 | |
| 284 | va = (unsigned long)ldt_slot_va(ldt->slot) + offset; |
| 285 | ptep = get_locked_pte(mm, va, &ptl); |
| 286 | pte_clear(mm, va, ptep); |
| 287 | pte_unmap_unlock(ptep, ptl); |
| 288 | } |
| 289 | |
| 290 | va = (unsigned long)ldt_slot_va(ldt->slot); |
| 291 | flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false); |
| 292 | } |
| 293 | |
Joerg Roedel | 9bae319 | 2018-07-18 11:41:12 +0200 | [diff] [blame] | 294 | #else /* !CONFIG_PAGE_TABLE_ISOLATION */ |
| 295 | |
| 296 | static int |
| 297 | map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) |
| 298 | { |
| 299 | return 0; |
| 300 | } |
Kirill A. Shutemov | a0e6e08 | 2018-10-26 15:28:55 +0300 | [diff] [blame] | 301 | |
| 302 | static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) |
| 303 | { |
| 304 | } |
Joerg Roedel | 9bae319 | 2018-07-18 11:41:12 +0200 | [diff] [blame] | 305 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ |
| 306 | |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 307 | static void free_ldt_pgtables(struct mm_struct *mm) |
| 308 | { |
| 309 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
| 310 | struct mmu_gather tlb; |
| 311 | unsigned long start = LDT_BASE_ADDR; |
Joerg Roedel | 8195d86 | 2018-07-18 11:41:11 +0200 | [diff] [blame] | 312 | unsigned long end = LDT_END_ADDR; |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 313 | |
Borislav Petkov | 67e87d4 | 2019-03-29 19:52:59 +0100 | [diff] [blame] | 314 | if (!boot_cpu_has(X86_FEATURE_PTI)) |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 315 | return; |
| 316 | |
| 317 | tlb_gather_mmu(&tlb, mm, start, end); |
| 318 | free_pgd_range(&tlb, start, end, start, end); |
| 319 | tlb_finish_mmu(&tlb, start, end); |
| 320 | #endif |
| 321 | } |
| 322 | |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 323 | /* After calling this, the LDT is immutable. */ |
| 324 | static void finalize_ldt_struct(struct ldt_struct *ldt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | { |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 326 | paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 327 | } |
Thomas Gleixner | 78aa1f6 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 328 | |
Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame] | 329 | static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt) |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 330 | { |
Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame] | 331 | mutex_lock(&mm->context.lock); |
Jeremy Fitzhardinge | 38ffbe6 | 2008-07-23 14:21:18 -0700 | [diff] [blame] | 332 | |
Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame] | 333 | /* Synchronizes with READ_ONCE in load_mm_ldt. */ |
| 334 | smp_store_release(&mm->context.ldt, ldt); |
| 335 | |
| 336 | /* Activate the LDT for all CPUs using currents mm. */ |
| 337 | on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); |
| 338 | |
| 339 | mutex_unlock(&mm->context.lock); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 340 | } |
| 341 | |
| 342 | static void free_ldt_struct(struct ldt_struct *ldt) |
| 343 | { |
| 344 | if (likely(!ldt)) |
| 345 | return; |
| 346 | |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 347 | paravirt_free_ldt(ldt->entries, ldt->nr_entries); |
| 348 | if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE) |
Andrey Ryabinin | 8d5341a | 2016-12-12 16:44:17 -0800 | [diff] [blame] | 349 | vfree_atomic(ldt->entries); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 350 | else |
Jan Beulich | f454b47 | 2015-09-02 09:45:58 -0600 | [diff] [blame] | 351 | free_page((unsigned long)ldt->entries); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 352 | kfree(ldt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | /* |
Thomas Gleixner | a4828f8 | 2017-12-14 12:27:31 +0100 | [diff] [blame] | 356 | * Called on fork from arch_dup_mmap(). Just copy the current LDT state, |
| 357 | * the new task is not running, so nothing can be installed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | */ |
Thomas Gleixner | a4828f8 | 2017-12-14 12:27:31 +0100 | [diff] [blame] | 359 | int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | { |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 361 | struct ldt_struct *new_ldt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | int retval = 0; |
| 363 | |
Thomas Gleixner | a4828f8 | 2017-12-14 12:27:31 +0100 | [diff] [blame] | 364 | if (!old_mm) |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 365 | return 0; |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 366 | |
| 367 | mutex_lock(&old_mm->context.lock); |
Thomas Gleixner | a4828f8 | 2017-12-14 12:27:31 +0100 | [diff] [blame] | 368 | if (!old_mm->context.ldt) |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 369 | goto out_unlock; |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 370 | |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 371 | new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 372 | if (!new_ldt) { |
| 373 | retval = -ENOMEM; |
| 374 | goto out_unlock; |
| 375 | } |
| 376 | |
| 377 | memcpy(new_ldt->entries, old_mm->context.ldt->entries, |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 378 | new_ldt->nr_entries * LDT_ENTRY_SIZE); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 379 | finalize_ldt_struct(new_ldt); |
| 380 | |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 381 | retval = map_ldt_struct(mm, new_ldt, 0); |
| 382 | if (retval) { |
| 383 | free_ldt_pgtables(mm); |
| 384 | free_ldt_struct(new_ldt); |
| 385 | goto out_unlock; |
| 386 | } |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 387 | mm->context.ldt = new_ldt; |
| 388 | |
| 389 | out_unlock: |
| 390 | mutex_unlock(&old_mm->context.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | return retval; |
| 392 | } |
| 393 | |
| 394 | /* |
Thomas Gleixner | 77e463d | 2008-01-30 13:30:14 +0100 | [diff] [blame] | 395 | * No need to lock the MM as we are the last user |
| 396 | * |
| 397 | * 64bit: Don't touch the LDT register - we're already in the next thread. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | */ |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 399 | void destroy_context_ldt(struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | { |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 401 | free_ldt_struct(mm->context.ldt); |
| 402 | mm->context.ldt = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | } |
| 404 | |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 405 | void ldt_arch_exit_mmap(struct mm_struct *mm) |
| 406 | { |
| 407 | free_ldt_pgtables(mm); |
| 408 | } |
| 409 | |
Thomas Gleixner | 78aa1f6 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 410 | static int read_ldt(void __user *ptr, unsigned long bytecount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | { |
Thomas Gleixner | 78aa1f6 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 412 | struct mm_struct *mm = current->mm; |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 413 | unsigned long entries_size; |
| 414 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | |
Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame] | 416 | down_read(&mm->context.ldt_usr_sem); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 417 | |
| 418 | if (!mm->context.ldt) { |
| 419 | retval = 0; |
| 420 | goto out_unlock; |
| 421 | } |
| 422 | |
Thomas Gleixner | 78aa1f6 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 423 | if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) |
| 424 | bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 426 | entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE; |
| 427 | if (entries_size > bytecount) |
| 428 | entries_size = bytecount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 430 | if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) { |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 431 | retval = -EFAULT; |
| 432 | goto out_unlock; |
| 433 | } |
| 434 | |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 435 | if (entries_size != bytecount) { |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 436 | /* Zero-fill the rest and pretend we read bytecount bytes. */ |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 437 | if (clear_user(ptr + entries_size, bytecount - entries_size)) { |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 438 | retval = -EFAULT; |
| 439 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | } |
| 441 | } |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 442 | retval = bytecount; |
| 443 | |
| 444 | out_unlock: |
Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame] | 445 | up_read(&mm->context.ldt_usr_sem); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 446 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | } |
| 448 | |
Thomas Gleixner | 78aa1f6 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 449 | static int read_default_ldt(void __user *ptr, unsigned long bytecount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | { |
Thomas Gleixner | 77e463d | 2008-01-30 13:30:14 +0100 | [diff] [blame] | 451 | /* CHECKME: Can we use _one_ random number ? */ |
| 452 | #ifdef CONFIG_X86_32 |
| 453 | unsigned long size = 5 * sizeof(struct desc_struct); |
| 454 | #else |
| 455 | unsigned long size = 128; |
| 456 | #endif |
| 457 | if (bytecount > size) |
| 458 | bytecount = size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | if (clear_user(ptr, bytecount)) |
| 460 | return -EFAULT; |
Thomas Gleixner | 78aa1f6 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 461 | return bytecount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | } |
| 463 | |
Thomas Gleixner | 78aa1f6 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 464 | static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | { |
Thomas Gleixner | 70f5088 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 466 | struct mm_struct *mm = current->mm; |
Thomas Gleixner | 990e9dc | 2016-12-10 00:13:51 +0100 | [diff] [blame] | 467 | struct ldt_struct *new_ldt, *old_ldt; |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 468 | unsigned int old_nr_entries, new_nr_entries; |
Thomas Gleixner | 990e9dc | 2016-12-10 00:13:51 +0100 | [diff] [blame] | 469 | struct user_desc ldt_info; |
Glauber de Oliveira Costa | 5af7250 | 2008-01-30 13:31:13 +0100 | [diff] [blame] | 470 | struct desc_struct ldt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | |
| 473 | error = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | if (bytecount != sizeof(ldt_info)) |
| 475 | goto out; |
Thomas Gleixner | 78aa1f6 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 476 | error = -EFAULT; |
Thomas Gleixner | 70f5088 | 2008-01-30 13:30:13 +0100 | [diff] [blame] | 477 | if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | goto out; |
| 479 | |
| 480 | error = -EINVAL; |
| 481 | if (ldt_info.entry_number >= LDT_ENTRIES) |
| 482 | goto out; |
| 483 | if (ldt_info.contents == 3) { |
| 484 | if (oldmode) |
| 485 | goto out; |
| 486 | if (ldt_info.seg_not_present == 0) |
| 487 | goto out; |
| 488 | } |
| 489 | |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 490 | if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || |
| 491 | LDT_empty(&ldt_info)) { |
| 492 | /* The user wants to clear the entry. */ |
| 493 | memset(&ldt, 0, sizeof(ldt)); |
| 494 | } else { |
| 495 | if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { |
| 496 | error = -EINVAL; |
| 497 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | } |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 499 | |
| 500 | fill_ldt(&ldt, &ldt_info); |
| 501 | if (oldmode) |
| 502 | ldt.avl = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | } |
| 504 | |
Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame] | 505 | if (down_write_killable(&mm->context.ldt_usr_sem)) |
| 506 | return -EINTR; |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 507 | |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 508 | old_ldt = mm->context.ldt; |
| 509 | old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; |
| 510 | new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 511 | |
| 512 | error = -ENOMEM; |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 513 | new_ldt = alloc_ldt_struct(new_nr_entries); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 514 | if (!new_ldt) |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 515 | goto out_unlock; |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 516 | |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 517 | if (old_ldt) |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 518 | memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE); |
| 519 | |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 520 | new_ldt->entries[ldt_info.entry_number] = ldt; |
| 521 | finalize_ldt_struct(new_ldt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 523 | /* |
| 524 | * If we are using PTI, map the new LDT into the userspace pagetables. |
| 525 | * If there is already an LDT, use the other slot so that other CPUs |
| 526 | * will continue to use the old LDT until install_ldt() switches |
| 527 | * them over to the new LDT. |
| 528 | */ |
| 529 | error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); |
| 530 | if (error) { |
Thomas Gleixner | a62d698 | 2017-12-31 11:24:34 +0100 | [diff] [blame] | 531 | /* |
| 532 | * This only can fail for the first LDT setup. If an LDT is |
| 533 | * already installed then the PTE page is already |
| 534 | * populated. Mop up a half populated page table. |
| 535 | */ |
Thomas Gleixner | 7f41419 | 2017-12-31 16:52:15 +0100 | [diff] [blame] | 536 | if (!WARN_ON_ONCE(old_ldt)) |
| 537 | free_ldt_pgtables(mm); |
Thomas Gleixner | a62d698 | 2017-12-31 11:24:34 +0100 | [diff] [blame] | 538 | free_ldt_struct(new_ldt); |
Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 539 | goto out_unlock; |
| 540 | } |
| 541 | |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 542 | install_ldt(mm, new_ldt); |
Kirill A. Shutemov | a0e6e08 | 2018-10-26 15:28:55 +0300 | [diff] [blame] | 543 | unmap_ldt_struct(mm, old_ldt); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 544 | free_ldt_struct(old_ldt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | error = 0; |
| 546 | |
| 547 | out_unlock: |
Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame] | 548 | up_write(&mm->context.ldt_usr_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | out: |
| 550 | return error; |
| 551 | } |
| 552 | |
Dave Hansen | da20ab3 | 2017-10-18 10:21:07 -0700 | [diff] [blame] | 553 | SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , |
| 554 | unsigned long , bytecount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | { |
| 556 | int ret = -ENOSYS; |
| 557 | |
| 558 | switch (func) { |
| 559 | case 0: |
| 560 | ret = read_ldt(ptr, bytecount); |
| 561 | break; |
| 562 | case 1: |
| 563 | ret = write_ldt(ptr, bytecount, 1); |
| 564 | break; |
| 565 | case 2: |
| 566 | ret = read_default_ldt(ptr, bytecount); |
| 567 | break; |
| 568 | case 0x11: |
| 569 | ret = write_ldt(ptr, bytecount, 0); |
| 570 | break; |
| 571 | } |
Dave Hansen | da20ab3 | 2017-10-18 10:21:07 -0700 | [diff] [blame] | 572 | /* |
| 573 | * The SYSCALL_DEFINE() macros give us an 'unsigned long' |
| 574 | * return type, but tht ABI for sys_modify_ldt() expects |
| 575 | * 'int'. This cast gives us an int-sized value in %rax |
| 576 | * for the return code. The 'unsigned' is necessary so |
| 577 | * the compiler does not try to sign-extend the negative |
| 578 | * return codes into the high half of the register when |
| 579 | * taking the value from int->long. |
| 580 | */ |
| 581 | return (unsigned int)ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | } |