Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1995 Linus Torvalds |
| 3 | * |
| 4 | * Pentium III FXSR, SSE support |
| 5 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 6 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * X86-64 port |
| 8 | * Andi Kleen. |
Ashok Raj | 76e4f66 | 2005-06-25 14:55:00 -0700 | [diff] [blame] | 9 | * |
| 10 | * CPU hotplug support - ashok.raj@intel.com |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | */ |
| 12 | |
| 13 | /* |
| 14 | * This file handles the architecture-dependent parts of process handling.. |
| 15 | */ |
| 16 | |
Ashok Raj | 76e4f66 | 2005-06-25 14:55:00 -0700 | [diff] [blame] | 17 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/errno.h> |
| 19 | #include <linux/sched.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 20 | #include <linux/sched/task.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 21 | #include <linux/sched/task_stack.h> |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 22 | #include <linux/fs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <linux/kernel.h> |
| 24 | #include <linux/mm.h> |
| 25 | #include <linux/elfcore.h> |
| 26 | #include <linux/smp.h> |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/user.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/interrupt.h> |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 30 | #include <linux/delay.h> |
Paul Gortmaker | 186f436 | 2016-07-13 20:18:56 -0400 | [diff] [blame] | 31 | #include <linux/export.h> |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 32 | #include <linux/ptrace.h> |
Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 33 | #include <linux/notifier.h> |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 34 | #include <linux/kprobes.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 35 | #include <linux/kdebug.h> |
Erik Bosman | 529e25f | 2008-04-14 00:24:18 +0200 | [diff] [blame] | 36 | #include <linux/prctl.h> |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 37 | #include <linux/uaccess.h> |
| 38 | #include <linux/io.h> |
Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 39 | #include <linux/ftrace.h> |
Kyle Huey | ff3f097 | 2017-03-20 01:16:21 -0700 | [diff] [blame] | 40 | #include <linux/syscalls.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #include <asm/pgtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <asm/processor.h> |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 44 | #include <asm/fpu/internal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include <asm/mmu_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #include <asm/prctl.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #include <asm/desc.h> |
| 48 | #include <asm/proto.h> |
| 49 | #include <asm/ia32.h> |
Jaswinder Singh | bbc1f69 | 2008-07-21 21:34:13 +0530 | [diff] [blame] | 50 | #include <asm/syscalls.h> |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 51 | #include <asm/debugreg.h> |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 52 | #include <asm/switch_to.h> |
Andy Lutomirski | b7a58459 | 2016-03-16 14:14:21 -0700 | [diff] [blame] | 53 | #include <asm/xen/hypervisor.h> |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 54 | #include <asm/vdso.h> |
Babu Moger | fa7d949 | 2018-11-21 20:28:25 +0000 | [diff] [blame] | 55 | #include <asm/resctrl_sched.h> |
Dmitry Safonov | ada2648 | 2017-03-31 14:11:37 +0300 | [diff] [blame] | 56 | #include <asm/unistd.h> |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 57 | #include <asm/fsgsbase.h> |
Dmitry Safonov | ada2648 | 2017-03-31 14:11:37 +0300 | [diff] [blame] | 58 | #ifdef CONFIG_IA32_EMULATION |
| 59 | /* Not included via unistd.h */ |
| 60 | #include <asm/unistd_32_ia32.h> |
| 61 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
Thomas Gleixner | ff16701 | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 63 | #include "process.h" |
| 64 | |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 65 | /* Prints also some state that isn't saved in the pt_regs */ |
Jann Horn | 9fe6299 | 2018-08-31 21:41:51 +0200 | [diff] [blame] | 66 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | { |
| 68 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 69 | unsigned long d0, d1, d2, d3, d6, d7; |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 70 | unsigned int fsindex, gsindex; |
Andy Lutomirski | d38bc89 | 2018-11-21 15:11:24 -0800 | [diff] [blame] | 71 | unsigned int ds, es; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 73 | show_iret_regs(regs); |
| 74 | |
Josh Poimboeuf | 6fa81a12 | 2016-10-20 11:34:45 -0500 | [diff] [blame] | 75 | if (regs->orig_ax != -1) |
| 76 | pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); |
| 77 | else |
| 78 | pr_cont("\n"); |
| 79 | |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 80 | printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n", |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 81 | regs->ax, regs->bx, regs->cx); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 82 | printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n", |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 83 | regs->dx, regs->si, regs->di); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 84 | printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n", |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 85 | regs->bp, regs->r8, regs->r9); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 86 | printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n", |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 87 | regs->r10, regs->r11, regs->r12); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 88 | printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 89 | regs->r13, regs->r14, regs->r15); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | |
Jann Horn | 9fe6299 | 2018-08-31 21:41:51 +0200 | [diff] [blame] | 91 | if (mode == SHOW_REGS_SHORT) |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 92 | return; |
| 93 | |
Jann Horn | 9fe6299 | 2018-08-31 21:41:51 +0200 | [diff] [blame] | 94 | if (mode == SHOW_REGS_USER) { |
| 95 | rdmsrl(MSR_FS_BASE, fs); |
| 96 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); |
| 97 | printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n", |
| 98 | fs, shadowgs); |
| 99 | return; |
| 100 | } |
| 101 | |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 102 | asm("movl %%ds,%0" : "=r" (ds)); |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 103 | asm("movl %%es,%0" : "=r" (es)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | asm("movl %%fs,%0" : "=r" (fsindex)); |
| 105 | asm("movl %%gs,%0" : "=r" (gsindex)); |
| 106 | |
| 107 | rdmsrl(MSR_FS_BASE, fs); |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 108 | rdmsrl(MSR_GS_BASE, gs); |
| 109 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
Glauber de Oliveira Costa | f51c945 | 2007-07-22 11:12:29 +0200 | [diff] [blame] | 111 | cr0 = read_cr0(); |
| 112 | cr2 = read_cr2(); |
Andy Lutomirski | 6c690ee | 2017-06-12 10:26:14 -0700 | [diff] [blame] | 113 | cr3 = __read_cr3(); |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 114 | cr4 = __read_cr4(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 116 | printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 117 | fs, fsindex, gs, gsindex, shadowgs); |
Andy Lutomirski | d38bc89 | 2018-11-21 15:11:24 -0800 | [diff] [blame] | 118 | printk(KERN_DEFAULT "CS: %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds, |
Gustavo F. Padovan | 8092c65 | 2008-07-29 02:48:52 -0300 | [diff] [blame] | 119 | es, cr0); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 120 | printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, |
Gustavo F. Padovan | 8092c65 | 2008-07-29 02:48:52 -0300 | [diff] [blame] | 121 | cr4); |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 122 | |
| 123 | get_debugreg(d0, 0); |
| 124 | get_debugreg(d1, 1); |
| 125 | get_debugreg(d2, 2); |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 126 | get_debugreg(d3, 3); |
| 127 | get_debugreg(d6, 6); |
| 128 | get_debugreg(d7, 7); |
Dave Jones | 4338774 | 2013-06-18 12:09:11 -0400 | [diff] [blame] | 129 | |
| 130 | /* Only print out debug registers if they are in their non-default state. */ |
Nicolas Iooss | ba6d018 | 2016-09-10 20:30:45 +0200 | [diff] [blame] | 131 | if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && |
| 132 | (d6 == DR6_RESERVED) && (d7 == 0x400))) { |
| 133 | printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", |
| 134 | d0, d1, d2); |
| 135 | printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", |
| 136 | d3, d6, d7); |
| 137 | } |
Dave Jones | 4338774 | 2013-06-18 12:09:11 -0400 | [diff] [blame] | 138 | |
Dave Hansen | c0b17b5 | 2016-02-12 13:02:25 -0800 | [diff] [blame] | 139 | if (boot_cpu_has(X86_FEATURE_OSPKE)) |
| 140 | printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | } |
| 142 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | void release_thread(struct task_struct *dead_task) |
| 144 | { |
| 145 | if (dead_task->mm) { |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 146 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 147 | if (dead_task->mm->context.ldt) { |
Chen Gang | 349eab6 | 2012-11-06 14:45:46 +0800 | [diff] [blame] | 148 | pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 149 | dead_task->comm, |
Jan Beulich | 0d430e3 | 2015-12-22 08:42:44 -0700 | [diff] [blame] | 150 | dead_task->mm->context.ldt->entries, |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 151 | dead_task->mm->context.ldt->nr_entries); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | BUG(); |
| 153 | } |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 154 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | } |
| 156 | } |
| 157 | |
Andy Lutomirski | e137a4d | 2017-08-01 07:11:37 -0700 | [diff] [blame] | 158 | enum which_selector { |
| 159 | FS, |
| 160 | GS |
| 161 | }; |
| 162 | |
| 163 | /* |
| 164 | * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are |
| 165 | * not available. The goal is to be reasonably fast on non-FSGSBASE systems. |
| 166 | * It's forcibly inlined because it'll generate better code and this function |
| 167 | * is hot. |
| 168 | */ |
| 169 | static __always_inline void save_base_legacy(struct task_struct *prev_p, |
| 170 | unsigned short selector, |
| 171 | enum which_selector which) |
| 172 | { |
| 173 | if (likely(selector == 0)) { |
| 174 | /* |
| 175 | * On Intel (without X86_BUG_NULL_SEG), the segment base could |
| 176 | * be the pre-existing saved base or it could be zero. On AMD |
| 177 | * (with X86_BUG_NULL_SEG), the segment base could be almost |
| 178 | * anything. |
| 179 | * |
| 180 | * This branch is very hot (it's hit twice on almost every |
| 181 | * context switch between 64-bit programs), and avoiding |
| 182 | * the RDMSR helps a lot, so we just assume that whatever |
| 183 | * value is already saved is correct. This matches historical |
| 184 | * Linux behavior, so it won't break existing applications. |
| 185 | * |
| 186 | * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we |
| 187 | * report that the base is zero, it needs to actually be zero: |
| 188 | * see the corresponding logic in load_seg_legacy. |
| 189 | */ |
| 190 | } else { |
| 191 | /* |
| 192 | * If the selector is 1, 2, or 3, then the base is zero on |
| 193 | * !X86_BUG_NULL_SEG CPUs and could be anything on |
| 194 | * X86_BUG_NULL_SEG CPUs. In the latter case, Linux |
| 195 | * has never attempted to preserve the base across context |
| 196 | * switches. |
| 197 | * |
| 198 | * If selector > 3, then it refers to a real segment, and |
| 199 | * saving the base isn't necessary. |
| 200 | */ |
| 201 | if (which == FS) |
| 202 | prev_p->thread.fsbase = 0; |
| 203 | else |
| 204 | prev_p->thread.gsbase = 0; |
| 205 | } |
| 206 | } |
| 207 | |
| 208 | static __always_inline void save_fsgs(struct task_struct *task) |
| 209 | { |
| 210 | savesegment(fs, task->thread.fsindex); |
| 211 | savesegment(gs, task->thread.gsindex); |
| 212 | save_base_legacy(task, task->thread.fsindex, FS); |
| 213 | save_base_legacy(task, task->thread.gsindex, GS); |
| 214 | } |
| 215 | |
Vitaly Kuznetsov | 42b933b | 2018-03-13 18:48:04 +0100 | [diff] [blame] | 216 | #if IS_ENABLED(CONFIG_KVM) |
| 217 | /* |
| 218 | * While a process is running,current->thread.fsbase and current->thread.gsbase |
| 219 | * may not match the corresponding CPU registers (see save_base_legacy()). KVM |
| 220 | * wants an efficient way to save and restore FSBASE and GSBASE. |
| 221 | * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE. |
| 222 | */ |
| 223 | void save_fsgs_for_kvm(void) |
| 224 | { |
| 225 | save_fsgs(current); |
| 226 | } |
| 227 | EXPORT_SYMBOL_GPL(save_fsgs_for_kvm); |
| 228 | #endif |
| 229 | |
Andy Lutomirski | e137a4d | 2017-08-01 07:11:37 -0700 | [diff] [blame] | 230 | static __always_inline void loadseg(enum which_selector which, |
| 231 | unsigned short sel) |
| 232 | { |
| 233 | if (which == FS) |
| 234 | loadsegment(fs, sel); |
| 235 | else |
| 236 | load_gs_index(sel); |
| 237 | } |
| 238 | |
| 239 | static __always_inline void load_seg_legacy(unsigned short prev_index, |
| 240 | unsigned long prev_base, |
| 241 | unsigned short next_index, |
| 242 | unsigned long next_base, |
| 243 | enum which_selector which) |
| 244 | { |
| 245 | if (likely(next_index <= 3)) { |
| 246 | /* |
| 247 | * The next task is using 64-bit TLS, is not using this |
| 248 | * segment at all, or is having fun with arcane CPU features. |
| 249 | */ |
| 250 | if (next_base == 0) { |
| 251 | /* |
| 252 | * Nasty case: on AMD CPUs, we need to forcibly zero |
| 253 | * the base. |
| 254 | */ |
| 255 | if (static_cpu_has_bug(X86_BUG_NULL_SEG)) { |
| 256 | loadseg(which, __USER_DS); |
| 257 | loadseg(which, next_index); |
| 258 | } else { |
| 259 | /* |
| 260 | * We could try to exhaustively detect cases |
| 261 | * under which we can skip the segment load, |
| 262 | * but there's really only one case that matters |
| 263 | * for performance: if both the previous and |
| 264 | * next states are fully zeroed, we can skip |
| 265 | * the load. |
| 266 | * |
| 267 | * (This assumes that prev_base == 0 has no |
| 268 | * false positives. This is the case on |
| 269 | * Intel-style CPUs.) |
| 270 | */ |
| 271 | if (likely(prev_index | next_index | prev_base)) |
| 272 | loadseg(which, next_index); |
| 273 | } |
| 274 | } else { |
| 275 | if (prev_index != next_index) |
| 276 | loadseg(which, next_index); |
| 277 | wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, |
| 278 | next_base); |
| 279 | } |
| 280 | } else { |
| 281 | /* |
| 282 | * The next task is using a real segment. Loading the selector |
| 283 | * is sufficient. |
| 284 | */ |
| 285 | loadseg(which, next_index); |
| 286 | } |
| 287 | } |
| 288 | |
Chang S. Bae | f4550b5 | 2018-09-18 16:08:56 -0700 | [diff] [blame] | 289 | static __always_inline void x86_fsgsbase_load(struct thread_struct *prev, |
| 290 | struct thread_struct *next) |
| 291 | { |
| 292 | load_seg_legacy(prev->fsindex, prev->fsbase, |
| 293 | next->fsindex, next->fsbase, FS); |
| 294 | load_seg_legacy(prev->gsindex, prev->gsbase, |
| 295 | next->gsindex, next->gsbase, GS); |
| 296 | } |
| 297 | |
Chang S. Bae | e696c23 | 2018-09-18 16:08:54 -0700 | [diff] [blame] | 298 | static unsigned long x86_fsgsbase_read_task(struct task_struct *task, |
| 299 | unsigned short selector) |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 300 | { |
| 301 | unsigned short idx = selector >> 3; |
| 302 | unsigned long base; |
| 303 | |
| 304 | if (likely((selector & SEGMENT_TI_MASK) == 0)) { |
| 305 | if (unlikely(idx >= GDT_ENTRIES)) |
| 306 | return 0; |
| 307 | |
| 308 | /* |
| 309 | * There are no user segments in the GDT with nonzero bases |
| 310 | * other than the TLS segments. |
| 311 | */ |
| 312 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) |
| 313 | return 0; |
| 314 | |
| 315 | idx -= GDT_ENTRY_TLS_MIN; |
| 316 | base = get_desc_base(&task->thread.tls_array[idx]); |
| 317 | } else { |
| 318 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
| 319 | struct ldt_struct *ldt; |
| 320 | |
| 321 | /* |
| 322 | * If performance here mattered, we could protect the LDT |
| 323 | * with RCU. This is a slow path, though, so we can just |
| 324 | * take the mutex. |
| 325 | */ |
| 326 | mutex_lock(&task->mm->context.lock); |
| 327 | ldt = task->mm->context.ldt; |
| 328 | if (unlikely(idx >= ldt->nr_entries)) |
| 329 | base = 0; |
| 330 | else |
| 331 | base = get_desc_base(ldt->entries + idx); |
| 332 | mutex_unlock(&task->mm->context.lock); |
| 333 | #else |
| 334 | base = 0; |
| 335 | #endif |
| 336 | } |
| 337 | |
| 338 | return base; |
| 339 | } |
| 340 | |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 341 | unsigned long x86_fsbase_read_task(struct task_struct *task) |
| 342 | { |
| 343 | unsigned long fsbase; |
| 344 | |
| 345 | if (task == current) |
| 346 | fsbase = x86_fsbase_read_cpu(); |
| 347 | else if (task->thread.fsindex == 0) |
| 348 | fsbase = task->thread.fsbase; |
| 349 | else |
| 350 | fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); |
| 351 | |
| 352 | return fsbase; |
| 353 | } |
| 354 | |
| 355 | unsigned long x86_gsbase_read_task(struct task_struct *task) |
| 356 | { |
| 357 | unsigned long gsbase; |
| 358 | |
| 359 | if (task == current) |
| 360 | gsbase = x86_gsbase_read_cpu_inactive(); |
| 361 | else if (task->thread.gsindex == 0) |
| 362 | gsbase = task->thread.gsbase; |
| 363 | else |
| 364 | gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); |
| 365 | |
| 366 | return gsbase; |
| 367 | } |
| 368 | |
Chang S. Bae | 87ab468 | 2018-11-26 11:55:24 -0800 | [diff] [blame] | 369 | void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 370 | { |
Chang S. Bae | 87ab468 | 2018-11-26 11:55:24 -0800 | [diff] [blame] | 371 | WARN_ON_ONCE(task == current); |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 372 | |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 373 | task->thread.fsbase = fsbase; |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 374 | } |
| 375 | |
Chang S. Bae | 87ab468 | 2018-11-26 11:55:24 -0800 | [diff] [blame] | 376 | void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 377 | { |
Chang S. Bae | 87ab468 | 2018-11-26 11:55:24 -0800 | [diff] [blame] | 378 | WARN_ON_ONCE(task == current); |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 379 | |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 380 | task->thread.gsbase = gsbase; |
Chang S. Bae | b1378a5 | 2018-09-18 16:08:53 -0700 | [diff] [blame] | 381 | } |
| 382 | |
Josh Triplett | c1bd55f | 2015-06-30 15:00:00 -0700 | [diff] [blame] | 383 | int copy_thread_tls(unsigned long clone_flags, unsigned long sp, |
| 384 | unsigned long arg, struct task_struct *p, unsigned long tls) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | { |
| 386 | int err; |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 387 | struct pt_regs *childregs; |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 388 | struct fork_frame *fork_frame; |
| 389 | struct inactive_task_frame *frame; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | struct task_struct *me = current; |
| 391 | |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 392 | childregs = task_pt_regs(p); |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 393 | fork_frame = container_of(childregs, struct fork_frame, regs); |
| 394 | frame = &fork_frame->frame; |
| 395 | frame->bp = 0; |
| 396 | frame->ret_addr = (unsigned long) ret_from_fork; |
| 397 | p->thread.sp = (unsigned long) fork_frame; |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 398 | p->thread.io_bitmap_ptr = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | |
Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 400 | savesegment(gs, p->thread.gsindex); |
Andy Lutomirski | 296f781 | 2016-04-26 12:23:29 -0700 | [diff] [blame] | 401 | p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase; |
Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 402 | savesegment(fs, p->thread.fsindex); |
Andy Lutomirski | 296f781 | 2016-04-26 12:23:29 -0700 | [diff] [blame] | 403 | p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase; |
Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 404 | savesegment(es, p->thread.es); |
| 405 | savesegment(ds, p->thread.ds); |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 406 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
| 407 | |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 408 | if (unlikely(p->flags & PF_KTHREAD)) { |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 409 | /* kernel thread */ |
| 410 | memset(childregs, 0, sizeof(struct pt_regs)); |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 411 | frame->bx = sp; /* function */ |
| 412 | frame->r12 = arg; |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 413 | return 0; |
| 414 | } |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 415 | frame->bx = 0; |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 416 | *childregs = *current_pt_regs(); |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 417 | |
| 418 | childregs->ax = 0; |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 419 | if (sp) |
| 420 | childregs->sp = sp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 422 | err = -ENOMEM; |
Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 423 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { |
Thomas Meyer | cced402 | 2011-11-17 23:43:40 +0100 | [diff] [blame] | 424 | p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, |
| 425 | IO_BITMAP_BYTES, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | if (!p->thread.io_bitmap_ptr) { |
| 427 | p->thread.io_bitmap_max = 0; |
| 428 | return -ENOMEM; |
| 429 | } |
Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 430 | set_tsk_thread_flag(p, TIF_IO_BITMAP); |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 431 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | |
| 433 | /* |
| 434 | * Set a new TLS for the child thread? |
| 435 | */ |
| 436 | if (clone_flags & CLONE_SETTLS) { |
| 437 | #ifdef CONFIG_IA32_EMULATION |
Dmitry Safonov | abfb949 | 2016-04-18 16:43:43 +0300 | [diff] [blame] | 438 | if (in_ia32_syscall()) |
Roland McGrath | efd1ca5 | 2008-01-30 13:30:46 +0100 | [diff] [blame] | 439 | err = do_set_thread_area(p, -1, |
Josh Triplett | c1bd55f | 2015-06-30 15:00:00 -0700 | [diff] [blame] | 440 | (struct user_desc __user *)tls, 0); |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 441 | else |
| 442 | #endif |
Kyle Huey | 17a6e1b | 2017-03-20 01:16:22 -0700 | [diff] [blame] | 443 | err = do_arch_prctl_64(p, ARCH_SET_FS, tls); |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 444 | if (err) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | goto out; |
| 446 | } |
| 447 | err = 0; |
| 448 | out: |
| 449 | if (err && p->thread.io_bitmap_ptr) { |
| 450 | kfree(p->thread.io_bitmap_ptr); |
| 451 | p->thread.io_bitmap_max = 0; |
| 452 | } |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 453 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | return err; |
| 455 | } |
| 456 | |
H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 457 | static void |
| 458 | start_thread_common(struct pt_regs *regs, unsigned long new_ip, |
| 459 | unsigned long new_sp, |
| 460 | unsigned int _cs, unsigned int _ss, unsigned int _ds) |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 461 | { |
Andy Lutomirski | 767d035 | 2017-08-01 07:11:34 -0700 | [diff] [blame] | 462 | WARN_ON_ONCE(regs != current_pt_regs()); |
| 463 | |
| 464 | if (static_cpu_has(X86_BUG_NULL_SEG)) { |
| 465 | /* Loading zero below won't clear the base. */ |
| 466 | loadsegment(fs, __USER_DS); |
| 467 | load_gs_index(__USER_DS); |
| 468 | } |
| 469 | |
Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 470 | loadsegment(fs, 0); |
H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 471 | loadsegment(es, _ds); |
| 472 | loadsegment(ds, _ds); |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 473 | load_gs_index(0); |
Andy Lutomirski | 767d035 | 2017-08-01 07:11:34 -0700 | [diff] [blame] | 474 | |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 475 | regs->ip = new_ip; |
| 476 | regs->sp = new_sp; |
H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 477 | regs->cs = _cs; |
| 478 | regs->ss = _ss; |
H. Peter Anvin | a6f05a6 | 2009-10-08 18:02:54 -0700 | [diff] [blame] | 479 | regs->flags = X86_EFLAGS_IF; |
Brian Gerst | 1daeaa3 | 2015-03-21 18:54:21 -0400 | [diff] [blame] | 480 | force_iret(); |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 481 | } |
H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 482 | |
| 483 | void |
| 484 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) |
| 485 | { |
| 486 | start_thread_common(regs, new_ip, new_sp, |
| 487 | __USER_CS, __USER_DS, 0); |
| 488 | } |
Rian Hunter | dc76803 | 2018-08-19 16:08:53 -0700 | [diff] [blame] | 489 | EXPORT_SYMBOL_GPL(start_thread); |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 490 | |
Brian Gerst | 7da7707 | 2015-06-22 07:55:13 -0400 | [diff] [blame] | 491 | #ifdef CONFIG_COMPAT |
| 492 | void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) |
H. Peter Anvin | a6f05a6 | 2009-10-08 18:02:54 -0700 | [diff] [blame] | 493 | { |
H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 494 | start_thread_common(regs, new_ip, new_sp, |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 495 | test_thread_flag(TIF_X32) |
| 496 | ? __USER_CS : __USER32_CS, |
| 497 | __USER_DS, __USER_DS); |
H. Peter Anvin | a6f05a6 | 2009-10-08 18:02:54 -0700 | [diff] [blame] | 498 | } |
| 499 | #endif |
Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 500 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | /* |
| 502 | * switch_to(x,y) should switch tasks from x to y. |
| 503 | * |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 504 | * This could still be optimized: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | * - fold all the options into a flag word and test it with a single test. |
| 506 | * - could test fs/gs bitsliced |
Andi Kleen | 099f318 | 2006-02-03 21:51:38 +0100 | [diff] [blame] | 507 | * |
| 508 | * Kprobes not supported here. Set the probe on schedule instead. |
Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 509 | * Function graph tracer not supported too. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | */ |
Andi Kleen | 35ea7903 | 2013-08-05 15:02:39 -0700 | [diff] [blame] | 511 | __visible __notrace_funcgraph struct task_struct * |
Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 512 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | { |
Jeremy Fitzhardinge | 87b935a | 2008-07-08 15:06:26 -0700 | [diff] [blame] | 514 | struct thread_struct *prev = &prev_p->thread; |
| 515 | struct thread_struct *next = &next_p->thread; |
Ingo Molnar | 384a23f | 2015-04-23 17:43:27 +0200 | [diff] [blame] | 516 | struct fpu *prev_fpu = &prev->fpu; |
| 517 | struct fpu *next_fpu = &next->fpu; |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 518 | int cpu = smp_processor_id(); |
Arjan van de Ven | e07e23e | 2006-09-26 10:52:36 +0200 | [diff] [blame] | 519 | |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 520 | WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && |
| 521 | this_cpu_read(irq_count) != -1); |
| 522 | |
Rik van Riel | c474e50 | 2016-10-14 08:15:31 -0400 | [diff] [blame] | 523 | switch_fpu_prepare(prev_fpu, cpu); |
Linus Torvalds | 4903062 | 2012-02-16 19:11:15 -0800 | [diff] [blame] | 524 | |
Jeremy Fitzhardinge | 478de5a | 2008-06-25 00:19:24 -0400 | [diff] [blame] | 525 | /* We must save %fs and %gs before load_TLS() because |
| 526 | * %fs and %gs may be cleared by load_TLS(). |
| 527 | * |
| 528 | * (e.g. xen_load_tls()) |
| 529 | */ |
Andy Lutomirski | e137a4d | 2017-08-01 07:11:37 -0700 | [diff] [blame] | 530 | save_fsgs(prev_p); |
Jeremy Fitzhardinge | 478de5a | 2008-06-25 00:19:24 -0400 | [diff] [blame] | 531 | |
Andy Lutomirski | f647d7c | 2014-12-08 13:55:20 -0800 | [diff] [blame] | 532 | /* |
| 533 | * Load TLS before restoring any segments so that segment loads |
| 534 | * reference the correct GDT entries. |
| 535 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | load_TLS(next, cpu); |
| 537 | |
Jeremy Fitzhardinge | 3fe0a63 | 2008-06-25 00:19:23 -0400 | [diff] [blame] | 538 | /* |
Andy Lutomirski | f647d7c | 2014-12-08 13:55:20 -0800 | [diff] [blame] | 539 | * Leave lazy mode, flushing any hypercalls made here. This |
| 540 | * must be done after loading TLS entries in the GDT but before |
Sebastian Andrzej Siewior | 6dd677a | 2019-04-03 18:41:31 +0200 | [diff] [blame^] | 541 | * loading segments that might reference them. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | */ |
Jeremy Fitzhardinge | 224101e | 2009-02-18 11:18:57 -0800 | [diff] [blame] | 543 | arch_end_context_switch(next_p); |
Jeremy Fitzhardinge | 3fe0a63 | 2008-06-25 00:19:23 -0400 | [diff] [blame] | 544 | |
Andy Lutomirski | f647d7c | 2014-12-08 13:55:20 -0800 | [diff] [blame] | 545 | /* Switch DS and ES. |
| 546 | * |
| 547 | * Reading them only returns the selectors, but writing them (if |
| 548 | * nonzero) loads the full descriptor from the GDT or LDT. The |
| 549 | * LDT for next is loaded in switch_mm, and the GDT is loaded |
| 550 | * above. |
| 551 | * |
| 552 | * We therefore need to write new values to the segment |
| 553 | * registers on every context switch unless both the new and old |
| 554 | * values are zero. |
| 555 | * |
| 556 | * Note that we don't need to do anything for CS and SS, as |
| 557 | * those are saved and restored as part of pt_regs. |
| 558 | */ |
| 559 | savesegment(es, prev->es); |
| 560 | if (unlikely(next->es | prev->es)) |
| 561 | loadsegment(es, next->es); |
| 562 | |
| 563 | savesegment(ds, prev->ds); |
| 564 | if (unlikely(next->ds | prev->ds)) |
| 565 | loadsegment(ds, next->ds); |
| 566 | |
Chang S. Bae | f4550b5 | 2018-09-18 16:08:56 -0700 | [diff] [blame] | 567 | x86_fsgsbase_load(prev, next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | |
Rik van Riel | c474e50 | 2016-10-14 08:15:31 -0400 | [diff] [blame] | 569 | switch_fpu_finish(next_fpu, cpu); |
Linus Torvalds | 34ddc81 | 2012-02-18 12:56:35 -0800 | [diff] [blame] | 570 | |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 571 | /* |
Jan Beulich | 45948d7 | 2006-03-25 16:29:25 +0100 | [diff] [blame] | 572 | * Switch the PDA and FPU contexts. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | */ |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 574 | this_cpu_write(current_task, next_p); |
Andy Lutomirski | 9aaefe7 | 2017-12-04 15:07:21 +0100 | [diff] [blame] | 575 | this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); |
Andi Kleen | 18bd057 | 2006-04-20 02:36:45 +0200 | [diff] [blame] | 576 | |
Andy Lutomirski | bd7dc5a | 2017-11-02 00:59:09 -0700 | [diff] [blame] | 577 | /* Reload sp0. */ |
Joerg Roedel | 252e1a0 | 2018-07-18 11:40:51 +0200 | [diff] [blame] | 578 | update_task_stack(next_p); |
Andy Lutomirski | b27559a | 2015-03-06 17:50:18 -0800 | [diff] [blame] | 579 | |
Thomas Gleixner | ff16701 | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 580 | switch_to_extra(prev_p, next_p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | |
Vitaly Kuznetsov | 5e57f1d | 2017-03-14 18:35:38 +0100 | [diff] [blame] | 582 | #ifdef CONFIG_XEN_PV |
Andy Lutomirski | b7a58459 | 2016-03-16 14:14:21 -0700 | [diff] [blame] | 583 | /* |
| 584 | * On Xen PV, IOPL bits in pt_regs->flags have no effect, and |
| 585 | * current_pt_regs()->flags may not match the current task's |
| 586 | * intended IOPL. We need to switch it manually. |
| 587 | */ |
| 588 | if (unlikely(static_cpu_has(X86_FEATURE_XENPV) && |
| 589 | prev->iopl != next->iopl)) |
| 590 | xen_set_iopl_mask(next->iopl); |
| 591 | #endif |
| 592 | |
Andy Lutomirski | 61f01dd | 2015-04-26 16:47:59 -0700 | [diff] [blame] | 593 | if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) { |
| 594 | /* |
| 595 | * AMD CPUs have a misfeature: SYSRET sets the SS selector but |
| 596 | * does not update the cached descriptor. As a result, if we |
| 597 | * do SYSRET while SS is NULL, we'll end up in user mode with |
| 598 | * SS apparently equal to __USER_DS but actually unusable. |
| 599 | * |
| 600 | * The straightforward workaround would be to fix it up just |
| 601 | * before SYSRET, but that would slow down the system call |
| 602 | * fast paths. Instead, we ensure that SS is never NULL in |
| 603 | * system call context. We do this by replacing NULL SS |
| 604 | * selectors at every context switch. SYSCALL sets up a valid |
| 605 | * SS, so the only way to get NULL is to re-enter the kernel |
| 606 | * from CPL 3 through an interrupt. Since that can't happen |
| 607 | * in the same task as a running syscall, we are guaranteed to |
| 608 | * context switch between every interrupt vector entry and a |
| 609 | * subsequent SYSRET. |
| 610 | * |
| 611 | * We read SS first because SS reads are much faster than |
| 612 | * writes. Out of caution, we force SS to __KERNEL_DS even if |
| 613 | * it previously had a different non-NULL value. |
| 614 | */ |
| 615 | unsigned short ss_sel; |
| 616 | savesegment(ss, ss_sel); |
| 617 | if (ss_sel != __KERNEL_DS) |
| 618 | loadsegment(ss, __KERNEL_DS); |
| 619 | } |
| 620 | |
Fenghua Yu | 4f341a5 | 2016-10-28 15:04:48 -0700 | [diff] [blame] | 621 | /* Load the Intel cache allocation PQR MSR. */ |
Babu Moger | 352940e | 2018-11-21 20:28:27 +0000 | [diff] [blame] | 622 | resctrl_sched_in(); |
Fenghua Yu | 4f341a5 | 2016-10-28 15:04:48 -0700 | [diff] [blame] | 623 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | return prev_p; |
| 625 | } |
| 626 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | void set_personality_64bit(void) |
| 628 | { |
| 629 | /* inherit personality from parent */ |
| 630 | |
| 631 | /* Make sure to be in 64bit mode */ |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 632 | clear_thread_flag(TIF_IA32); |
H. Peter Anvin | 6bd3300 | 2012-02-06 13:03:09 -0800 | [diff] [blame] | 633 | clear_thread_flag(TIF_ADDR32); |
H. Peter Anvin | bb21272 | 2012-02-14 13:56:49 -0800 | [diff] [blame] | 634 | clear_thread_flag(TIF_X32); |
Dmitry Safonov | ada2648 | 2017-03-31 14:11:37 +0300 | [diff] [blame] | 635 | /* Pretend that this comes from a 64bit execve */ |
| 636 | task_pt_regs(current)->orig_ax = __NR_execve; |
Dmitry Safonov | acf4602 | 2018-05-18 00:35:10 +0100 | [diff] [blame] | 637 | current_thread_info()->status &= ~TS_COMPAT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | |
Stephen Wilson | 375906f | 2011-03-13 15:49:14 -0400 | [diff] [blame] | 639 | /* Ensure the corresponding mm is not marked. */ |
| 640 | if (current->mm) |
| 641 | current->mm->context.ia32_compat = 0; |
| 642 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | /* TBD: overwrites user setup. Should have two bits. |
| 644 | But 64bit processes have always behaved this way, |
| 645 | so it's not too bad. The main problem is just that |
Ingo Molnar | a97673a | 2018-12-03 10:47:34 +0100 | [diff] [blame] | 646 | 32bit children are affected again. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | current->personality &= ~READ_IMPLIES_EXEC; |
| 648 | } |
| 649 | |
Dmitry Safonov | ada2648 | 2017-03-31 14:11:37 +0300 | [diff] [blame] | 650 | static void __set_personality_x32(void) |
| 651 | { |
| 652 | #ifdef CONFIG_X86_X32 |
| 653 | clear_thread_flag(TIF_IA32); |
| 654 | set_thread_flag(TIF_X32); |
| 655 | if (current->mm) |
| 656 | current->mm->context.ia32_compat = TIF_X32; |
| 657 | current->personality &= ~READ_IMPLIES_EXEC; |
| 658 | /* |
Dmitry Safonov | a846446 | 2018-10-12 14:42:52 +0100 | [diff] [blame] | 659 | * in_32bit_syscall() uses the presence of the x32 syscall bit |
Dmitry Safonov | ada2648 | 2017-03-31 14:11:37 +0300 | [diff] [blame] | 660 | * flag to determine compat status. The x86 mmap() code relies on |
| 661 | * the syscall bitness so set x32 syscall bit right here to make |
Dmitry Safonov | a846446 | 2018-10-12 14:42:52 +0100 | [diff] [blame] | 662 | * in_32bit_syscall() work during exec(). |
Dmitry Safonov | ada2648 | 2017-03-31 14:11:37 +0300 | [diff] [blame] | 663 | * |
| 664 | * Pretend to come from a x32 execve. |
| 665 | */ |
| 666 | task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; |
Andy Lutomirski | 37a8f7c | 2018-01-28 10:38:50 -0800 | [diff] [blame] | 667 | current_thread_info()->status &= ~TS_COMPAT; |
Dmitry Safonov | ada2648 | 2017-03-31 14:11:37 +0300 | [diff] [blame] | 668 | #endif |
| 669 | } |
| 670 | |
| 671 | static void __set_personality_ia32(void) |
| 672 | { |
| 673 | #ifdef CONFIG_IA32_EMULATION |
| 674 | set_thread_flag(TIF_IA32); |
| 675 | clear_thread_flag(TIF_X32); |
| 676 | if (current->mm) |
| 677 | current->mm->context.ia32_compat = TIF_IA32; |
| 678 | current->personality |= force_personality32; |
| 679 | /* Prepare the first "return" to user space */ |
| 680 | task_pt_regs(current)->orig_ax = __NR_ia32_execve; |
Andy Lutomirski | 37a8f7c | 2018-01-28 10:38:50 -0800 | [diff] [blame] | 681 | current_thread_info()->status |= TS_COMPAT; |
Dmitry Safonov | ada2648 | 2017-03-31 14:11:37 +0300 | [diff] [blame] | 682 | #endif |
| 683 | } |
| 684 | |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 685 | void set_personality_ia32(bool x32) |
H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 686 | { |
H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 687 | /* Make sure to be in 32bit mode */ |
H. Peter Anvin | 6bd3300 | 2012-02-06 13:03:09 -0800 | [diff] [blame] | 688 | set_thread_flag(TIF_ADDR32); |
H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 689 | |
Dmitry Safonov | ada2648 | 2017-03-31 14:11:37 +0300 | [diff] [blame] | 690 | if (x32) |
| 691 | __set_personality_x32(); |
| 692 | else |
| 693 | __set_personality_ia32(); |
H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 694 | } |
Larry Finger | febb72a | 2012-05-06 19:40:03 -0500 | [diff] [blame] | 695 | EXPORT_SYMBOL_GPL(set_personality_ia32); |
H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 696 | |
Ingo Molnar | 91b7bd3 | 2016-09-15 08:42:51 +0200 | [diff] [blame] | 697 | #ifdef CONFIG_CHECKPOINT_RESTORE |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 698 | static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) |
| 699 | { |
| 700 | int ret; |
| 701 | |
| 702 | ret = map_vdso_once(image, addr); |
| 703 | if (ret) |
| 704 | return ret; |
| 705 | |
| 706 | return (long)image->size; |
| 707 | } |
Ingo Molnar | 91b7bd3 | 2016-09-15 08:42:51 +0200 | [diff] [blame] | 708 | #endif |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 709 | |
Kyle Huey | 17a6e1b | 2017-03-20 01:16:22 -0700 | [diff] [blame] | 710 | long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 711 | { |
| 712 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | |
Kyle Huey | dd93938 | 2017-03-20 01:16:20 -0700 | [diff] [blame] | 714 | switch (option) { |
Chang S. Bae | e696c23 | 2018-09-18 16:08:54 -0700 | [diff] [blame] | 715 | case ARCH_SET_GS: { |
Chang S. Bae | 87ab468 | 2018-11-26 11:55:24 -0800 | [diff] [blame] | 716 | if (unlikely(arg2 >= TASK_SIZE_MAX)) |
| 717 | return -EPERM; |
| 718 | |
| 719 | preempt_disable(); |
| 720 | /* |
| 721 | * ARCH_SET_GS has always overwritten the index |
| 722 | * and the base. Zero is the most sensible value |
| 723 | * to put in the index, and is the only value that |
| 724 | * makes any sense if FSGSBASE is unavailable. |
| 725 | */ |
| 726 | if (task == current) { |
| 727 | loadseg(GS, 0); |
| 728 | x86_gsbase_write_cpu_inactive(arg2); |
| 729 | |
| 730 | /* |
| 731 | * On non-FSGSBASE systems, save_base_legacy() expects |
| 732 | * that we also fill in thread.gsbase. |
| 733 | */ |
| 734 | task->thread.gsbase = arg2; |
| 735 | |
| 736 | } else { |
| 737 | task->thread.gsindex = 0; |
| 738 | x86_gsbase_write_task(task, arg2); |
| 739 | } |
| 740 | preempt_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | break; |
Chang S. Bae | e696c23 | 2018-09-18 16:08:54 -0700 | [diff] [blame] | 742 | } |
| 743 | case ARCH_SET_FS: { |
Chang S. Bae | 87ab468 | 2018-11-26 11:55:24 -0800 | [diff] [blame] | 744 | /* |
| 745 | * Not strictly needed for %fs, but do it for symmetry |
| 746 | * with %gs |
| 747 | */ |
| 748 | if (unlikely(arg2 >= TASK_SIZE_MAX)) |
| 749 | return -EPERM; |
| 750 | |
| 751 | preempt_disable(); |
| 752 | /* |
| 753 | * Set the selector to 0 for the same reason |
| 754 | * as %gs above. |
| 755 | */ |
| 756 | if (task == current) { |
| 757 | loadseg(FS, 0); |
| 758 | x86_fsbase_write_cpu(arg2); |
| 759 | |
| 760 | /* |
| 761 | * On non-FSGSBASE systems, save_base_legacy() expects |
| 762 | * that we also fill in thread.fsbase. |
| 763 | */ |
| 764 | task->thread.fsbase = arg2; |
| 765 | } else { |
| 766 | task->thread.fsindex = 0; |
| 767 | x86_fsbase_write_task(task, arg2); |
| 768 | } |
| 769 | preempt_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | break; |
Chang S. Bae | e696c23 | 2018-09-18 16:08:54 -0700 | [diff] [blame] | 771 | } |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 772 | case ARCH_GET_FS: { |
Chang S. Bae | e696c23 | 2018-09-18 16:08:54 -0700 | [diff] [blame] | 773 | unsigned long base = x86_fsbase_read_task(task); |
Kyle Huey | 17a6e1b | 2017-03-20 01:16:22 -0700 | [diff] [blame] | 774 | |
Kyle Huey | 17a6e1b | 2017-03-20 01:16:22 -0700 | [diff] [blame] | 775 | ret = put_user(base, (unsigned long __user *)arg2); |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 776 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | } |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 778 | case ARCH_GET_GS: { |
Chang S. Bae | e696c23 | 2018-09-18 16:08:54 -0700 | [diff] [blame] | 779 | unsigned long base = x86_gsbase_read_task(task); |
Kyle Huey | 17a6e1b | 2017-03-20 01:16:22 -0700 | [diff] [blame] | 780 | |
Kyle Huey | 17a6e1b | 2017-03-20 01:16:22 -0700 | [diff] [blame] | 781 | ret = put_user(base, (unsigned long __user *)arg2); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | break; |
| 783 | } |
| 784 | |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 785 | #ifdef CONFIG_CHECKPOINT_RESTORE |
Vinson Lee | 6e68b08 | 2016-09-17 00:51:53 +0000 | [diff] [blame] | 786 | # ifdef CONFIG_X86_X32_ABI |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 787 | case ARCH_MAP_VDSO_X32: |
Kyle Huey | 17a6e1b | 2017-03-20 01:16:22 -0700 | [diff] [blame] | 788 | return prctl_map_vdso(&vdso_image_x32, arg2); |
Ingo Molnar | 91b7bd3 | 2016-09-15 08:42:51 +0200 | [diff] [blame] | 789 | # endif |
| 790 | # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 791 | case ARCH_MAP_VDSO_32: |
Kyle Huey | 17a6e1b | 2017-03-20 01:16:22 -0700 | [diff] [blame] | 792 | return prctl_map_vdso(&vdso_image_32, arg2); |
Ingo Molnar | 91b7bd3 | 2016-09-15 08:42:51 +0200 | [diff] [blame] | 793 | # endif |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 794 | case ARCH_MAP_VDSO_64: |
Kyle Huey | 17a6e1b | 2017-03-20 01:16:22 -0700 | [diff] [blame] | 795 | return prctl_map_vdso(&vdso_image_64, arg2); |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 796 | #endif |
| 797 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | default: |
| 799 | ret = -EINVAL; |
| 800 | break; |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 801 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 803 | return ret; |
| 804 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | |
Kyle Huey | 17a6e1b | 2017-03-20 01:16:22 -0700 | [diff] [blame] | 806 | SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | { |
Kyle Huey | b0b9b01 | 2017-03-20 01:16:23 -0700 | [diff] [blame] | 808 | long ret; |
| 809 | |
| 810 | ret = do_arch_prctl_64(current, option, arg2); |
| 811 | if (ret == -EINVAL) |
| 812 | ret = do_arch_prctl_common(current, option, arg2); |
| 813 | |
| 814 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 | } |
| 816 | |
Kyle Huey | 79170fd | 2017-03-20 01:16:24 -0700 | [diff] [blame] | 817 | #ifdef CONFIG_IA32_EMULATION |
| 818 | COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) |
| 819 | { |
| 820 | return do_arch_prctl_common(current, option, arg2); |
| 821 | } |
| 822 | #endif |
| 823 | |
Stefani Seibold | 89240ba | 2009-11-03 10:22:40 +0100 | [diff] [blame] | 824 | unsigned long KSTK_ESP(struct task_struct *task) |
| 825 | { |
Denys Vlasenko | 263042e | 2015-03-09 19:39:23 +0100 | [diff] [blame] | 826 | return task_pt_regs(task)->sp; |
Stefani Seibold | 89240ba | 2009-11-03 10:22:40 +0100 | [diff] [blame] | 827 | } |