Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 1 | /* |
| 2 | * x86 single-step support code, common to 32-bit and 64-bit. |
| 3 | */ |
| 4 | #include <linux/sched.h> |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/ptrace.h> |
Akinobu Mita | 254e0a6 | 2009-07-19 00:08:54 +0900 | [diff] [blame] | 7 | #include <asm/desc.h> |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 8 | #include <asm/mmu_context.h> |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 9 | |
Harvey Harrison | 37cd9cf | 2008-01-30 13:33:12 +0100 | [diff] [blame] | 10 | unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 11 | { |
| 12 | unsigned long addr, seg; |
| 13 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 14 | addr = regs->ip; |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 15 | seg = regs->cs & 0xffff; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 16 | if (v8086_mode(regs)) { |
Roland McGrath | 7122ec8 | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 17 | addr = (addr & 0xffff) + (seg << 4); |
| 18 | return addr; |
| 19 | } |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 20 | |
| 21 | /* |
| 22 | * We'll assume that the code segments in the GDT |
| 23 | * are all zero-based. That is largely true: the |
| 24 | * TLS segments are used for data, and the PNPBIOS |
| 25 | * and APM bios ones we just ignore here. |
| 26 | */ |
Roland McGrath | 3f80c1a | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 27 | if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { |
Akinobu Mita | 254e0a6 | 2009-07-19 00:08:54 +0900 | [diff] [blame] | 28 | struct desc_struct *desc; |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 29 | unsigned long base; |
| 30 | |
Juergen Gross | 136d9d8 | 2015-08-06 10:04:38 +0200 | [diff] [blame^] | 31 | seg >>= 3; |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 32 | |
| 33 | mutex_lock(&child->mm->context.lock); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 34 | if (unlikely(!child->mm->context.ldt || |
Juergen Gross | 136d9d8 | 2015-08-06 10:04:38 +0200 | [diff] [blame^] | 35 | seg >= child->mm->context.ldt->size)) |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 36 | addr = -1L; /* bogus selector, access would fault */ |
| 37 | else { |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 38 | desc = &child->mm->context.ldt->entries[seg]; |
Akinobu Mita | 254e0a6 | 2009-07-19 00:08:54 +0900 | [diff] [blame] | 39 | base = get_desc_base(desc); |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 40 | |
| 41 | /* 16-bit code segment? */ |
Akinobu Mita | 254e0a6 | 2009-07-19 00:08:54 +0900 | [diff] [blame] | 42 | if (!desc->d) |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 43 | addr &= 0xffff; |
| 44 | addr += base; |
| 45 | } |
| 46 | mutex_unlock(&child->mm->context.lock); |
| 47 | } |
| 48 | |
| 49 | return addr; |
| 50 | } |
| 51 | |
| 52 | static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) |
| 53 | { |
| 54 | int i, copied; |
| 55 | unsigned char opcode[15]; |
Harvey Harrison | 37cd9cf | 2008-01-30 13:33:12 +0100 | [diff] [blame] | 56 | unsigned long addr = convert_ip_to_linear(child, regs); |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 57 | |
| 58 | copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); |
| 59 | for (i = 0; i < copied; i++) { |
| 60 | switch (opcode[i]) { |
| 61 | /* popf and iret */ |
| 62 | case 0x9d: case 0xcf: |
| 63 | return 1; |
| 64 | |
| 65 | /* CHECKME: 64 65 */ |
| 66 | |
| 67 | /* opcode and address size prefixes */ |
| 68 | case 0x66: case 0x67: |
| 69 | continue; |
| 70 | /* irrelevant prefixes (segment overrides and repeats) */ |
| 71 | case 0x26: case 0x2e: |
| 72 | case 0x36: case 0x3e: |
| 73 | case 0x64: case 0x65: |
Roland McGrath | 5f76cb1 | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 74 | case 0xf0: case 0xf2: case 0xf3: |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 75 | continue; |
| 76 | |
Roland McGrath | 7122ec8 | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 77 | #ifdef CONFIG_X86_64 |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 78 | case 0x40 ... 0x4f: |
Andy Lutomirski | 318f5a2 | 2011-08-03 09:31:53 -0400 | [diff] [blame] | 79 | if (!user_64bit_mode(regs)) |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 80 | /* 32-bit mode: register increment */ |
| 81 | return 0; |
| 82 | /* 64-bit mode: REX prefix */ |
| 83 | continue; |
Roland McGrath | 7122ec8 | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 84 | #endif |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 85 | |
| 86 | /* CHECKME: f2, f3 */ |
| 87 | |
| 88 | /* |
| 89 | * pushf: NOTE! We should probably not let |
| 90 | * the user see the TF bit being set. But |
| 91 | * it's more pain than it's worth to avoid |
| 92 | * it, and a debugger could emulate this |
| 93 | * all in user space if it _really_ cares. |
| 94 | */ |
| 95 | case 0x9c: |
| 96 | default: |
| 97 | return 0; |
| 98 | } |
| 99 | } |
| 100 | return 0; |
| 101 | } |
| 102 | |
Roland McGrath | 10faa81 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 103 | /* |
| 104 | * Enable single-stepping. Return nonzero if user mode is not using TF itself. |
| 105 | */ |
| 106 | static int enable_single_step(struct task_struct *child) |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 107 | { |
| 108 | struct pt_regs *regs = task_pt_regs(child); |
Roland McGrath | 6718d0d | 2008-07-09 01:07:02 -0700 | [diff] [blame] | 109 | unsigned long oflags; |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 110 | |
| 111 | /* |
Roland McGrath | 380fdd7 | 2008-07-09 02:39:29 -0700 | [diff] [blame] | 112 | * If we stepped into a sysenter/syscall insn, it trapped in |
| 113 | * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. |
| 114 | * If user-mode had set TF itself, then it's still clear from |
| 115 | * do_debug() and we need to set it again to restore the user |
| 116 | * state so we don't wrongly set TIF_FORCED_TF below. |
| 117 | * If enable_single_step() was used last and that is what |
| 118 | * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are |
| 119 | * already set and our bookkeeping is fine. |
| 120 | */ |
| 121 | if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP))) |
| 122 | regs->flags |= X86_EFLAGS_TF; |
| 123 | |
| 124 | /* |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 125 | * Always set TIF_SINGLESTEP - this guarantees that |
| 126 | * we single-step system calls etc.. This will also |
| 127 | * cause us to set TF when returning to user mode. |
| 128 | */ |
| 129 | set_tsk_thread_flag(child, TIF_SINGLESTEP); |
| 130 | |
Roland McGrath | 6718d0d | 2008-07-09 01:07:02 -0700 | [diff] [blame] | 131 | oflags = regs->flags; |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 132 | |
| 133 | /* Set TF on the kernel stack.. */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 134 | regs->flags |= X86_EFLAGS_TF; |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 135 | |
| 136 | /* |
| 137 | * ..but if TF is changed by the instruction we will trace, |
| 138 | * don't mark it as being "us" that set it, so that we |
| 139 | * won't clear it by hand later. |
Roland McGrath | 6718d0d | 2008-07-09 01:07:02 -0700 | [diff] [blame] | 140 | * |
| 141 | * Note that if we don't actually execute the popf because |
| 142 | * of a signal arriving right now or suchlike, we will lose |
| 143 | * track of the fact that it really was "us" that set it. |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 144 | */ |
Roland McGrath | 6718d0d | 2008-07-09 01:07:02 -0700 | [diff] [blame] | 145 | if (is_setting_trap_flag(child, regs)) { |
| 146 | clear_tsk_thread_flag(child, TIF_FORCED_TF); |
Roland McGrath | 10faa81 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 147 | return 0; |
Roland McGrath | 6718d0d | 2008-07-09 01:07:02 -0700 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | /* |
| 151 | * If TF was already set, check whether it was us who set it. |
| 152 | * If not, we should never attempt a block step. |
| 153 | */ |
| 154 | if (oflags & X86_EFLAGS_TF) |
| 155 | return test_tsk_thread_flag(child, TIF_FORCED_TF); |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 156 | |
Roland McGrath | e1f2877 | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 157 | set_tsk_thread_flag(child, TIF_FORCED_TF); |
Roland McGrath | 10faa81 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 158 | |
| 159 | return 1; |
| 160 | } |
| 161 | |
Oleg Nesterov | 9bd1190 | 2012-09-03 15:24:17 +0200 | [diff] [blame] | 162 | void set_task_blockstep(struct task_struct *task, bool on) |
Oleg Nesterov | 848e8f5 | 2012-08-03 17:31:46 +0200 | [diff] [blame] | 163 | { |
| 164 | unsigned long debugctl; |
| 165 | |
Oleg Nesterov | 95cf00f | 2012-08-11 18:06:42 +0200 | [diff] [blame] | 166 | /* |
| 167 | * Ensure irq/preemption can't change debugctl in between. |
| 168 | * Note also that both TIF_BLOCKSTEP and debugctl should |
| 169 | * be changed atomically wrt preemption. |
Oleg Nesterov | 9899d11 | 2013-01-21 20:48:00 +0100 | [diff] [blame] | 170 | * |
| 171 | * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if |
| 172 | * task is current or it can't be running, otherwise we can race |
| 173 | * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but |
| 174 | * PTRACE_KILL is not safe. |
Oleg Nesterov | 95cf00f | 2012-08-11 18:06:42 +0200 | [diff] [blame] | 175 | */ |
| 176 | local_irq_disable(); |
Oleg Nesterov | 848e8f5 | 2012-08-03 17:31:46 +0200 | [diff] [blame] | 177 | debugctl = get_debugctlmsr(); |
| 178 | if (on) { |
| 179 | debugctl |= DEBUGCTLMSR_BTF; |
| 180 | set_tsk_thread_flag(task, TIF_BLOCKSTEP); |
| 181 | } else { |
| 182 | debugctl &= ~DEBUGCTLMSR_BTF; |
| 183 | clear_tsk_thread_flag(task, TIF_BLOCKSTEP); |
| 184 | } |
Oleg Nesterov | 95cf00f | 2012-08-11 18:06:42 +0200 | [diff] [blame] | 185 | if (task == current) |
| 186 | update_debugctlmsr(debugctl); |
| 187 | local_irq_enable(); |
Oleg Nesterov | 848e8f5 | 2012-08-03 17:31:46 +0200 | [diff] [blame] | 188 | } |
| 189 | |
Roland McGrath | 10faa81 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 190 | /* |
Roland McGrath | 10faa81 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 191 | * Enable single or block step. |
| 192 | */ |
| 193 | static void enable_step(struct task_struct *child, bool block) |
| 194 | { |
| 195 | /* |
| 196 | * Make sure block stepping (BTF) is not enabled unless it should be. |
| 197 | * Note that we don't try to worry about any is_setting_trap_flag() |
| 198 | * instructions after the first when using block stepping. |
Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 199 | * So no one should try to use debugger block stepping in a program |
Roland McGrath | 10faa81 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 200 | * that uses user-mode single stepping itself. |
| 201 | */ |
Oleg Nesterov | 848e8f5 | 2012-08-03 17:31:46 +0200 | [diff] [blame] | 202 | if (enable_single_step(child) && block) |
| 203 | set_task_blockstep(child, true); |
| 204 | else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) |
| 205 | set_task_blockstep(child, false); |
Roland McGrath | 10faa81 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 206 | } |
| 207 | |
| 208 | void user_enable_single_step(struct task_struct *child) |
| 209 | { |
| 210 | enable_step(child, 0); |
| 211 | } |
| 212 | |
| 213 | void user_enable_block_step(struct task_struct *child) |
| 214 | { |
| 215 | enable_step(child, 1); |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 216 | } |
| 217 | |
| 218 | void user_disable_single_step(struct task_struct *child) |
| 219 | { |
Roland McGrath | 10faa81 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 220 | /* |
| 221 | * Make sure block stepping (BTF) is disabled. |
| 222 | */ |
Oleg Nesterov | 848e8f5 | 2012-08-03 17:31:46 +0200 | [diff] [blame] | 223 | if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) |
| 224 | set_task_blockstep(child, false); |
Roland McGrath | 10faa81 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 225 | |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 226 | /* Always clear TIF_SINGLESTEP... */ |
| 227 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); |
| 228 | |
| 229 | /* But touch TF only if it was set by us.. */ |
Roland McGrath | e1f2877 | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 230 | if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 231 | task_pt_regs(child)->flags &= ~X86_EFLAGS_TF; |
Roland McGrath | fa1e03e | 2008-01-30 13:30:50 +0100 | [diff] [blame] | 232 | } |