blob: d9d4eb2ea6c90c9edac48ad259281b81caecf1a2 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
Paul Mackerras14cf11a2005-09-26 16:04:21 +100017#include <linux/errno.h>
18#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010019#include <linux/sched/debug.h>
Ingo Molnar29930022017-02-08 18:51:36 +010020#include <linux/sched/task.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010021#include <linux/sched/task_stack.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100022#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100025#include <linux/stddef.h>
26#include <linux/unistd.h>
27#include <linux/ptrace.h>
28#include <linux/slab.h>
29#include <linux/user.h>
30#include <linux/elf.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100031#include <linux/prctl.h>
32#include <linux/init_task.h>
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040033#include <linux/export.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100034#include <linux/kallsyms.h>
35#include <linux/mqueue.h>
36#include <linux/hardirq.h>
Paul Mackerras06d67d52005-10-10 22:29:05 +100037#include <linux/utsname.h>
Steven Rostedt6794c782009-02-09 21:10:27 -080038#include <linux/ftrace.h>
Martin Schwidefsky79741dd2008-12-31 15:11:38 +010039#include <linux/kernel_stat.h>
Anton Blanchardd8390882009-02-22 01:50:03 +000040#include <linux/personality.h>
41#include <linux/random.h>
K.Prasad5aae8a52010-06-15 11:35:19 +053042#include <linux/hw_breakpoint.h>
Anton Blanchard7b051f62014-10-13 20:27:15 +110043#include <linux/uaccess.h>
Daniel Axtens7f92bc52016-01-06 11:45:51 +110044#include <linux/elf-randomize.h>
Ram Pai06bb53b2018-01-18 17:50:31 -080045#include <linux/pkeys.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100046
47#include <asm/pgtable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100048#include <asm/io.h>
49#include <asm/processor.h>
50#include <asm/mmu.h>
51#include <asm/prom.h>
Michael Ellerman76032de2005-11-07 13:12:03 +110052#include <asm/machdep.h>
Paul Mackerrasc6622f62006-02-24 10:06:59 +110053#include <asm/time.h>
David Howellsae3a1972012-03-28 18:30:02 +010054#include <asm/runlatch.h>
Arnd Bergmanna7f31842006-03-23 00:00:08 +010055#include <asm/syscalls.h>
David Howellsae3a1972012-03-28 18:30:02 +010056#include <asm/switch_to.h>
Michael Neulingfb096922013-02-13 16:21:37 +000057#include <asm/tm.h>
David Howellsae3a1972012-03-28 18:30:02 +010058#include <asm/debug.h>
Paul Mackerras06d67d52005-10-10 22:29:05 +100059#ifdef CONFIG_PPC64
60#include <asm/firmware.h>
Madhavan Srinivasanc2e480b2017-12-20 09:25:42 +053061#include <asm/hw_irq.h>
Paul Mackerras06d67d52005-10-10 22:29:05 +100062#endif
Anton Blanchard7cedd602014-02-04 16:08:51 +110063#include <asm/code-patching.h>
Daniel Axtens7f92bc52016-01-06 11:45:51 +110064#include <asm/exec.h>
Michael Ellerman5d31a962016-03-24 22:04:04 +110065#include <asm/livepatch.h>
Kevin Haob92a2262016-07-23 14:42:40 +053066#include <asm/cpu_has_feature.h>
Daniel Axtens0545d542016-09-06 15:32:43 +100067#include <asm/asm-prototypes.h>
Michael Ellerman5d31a962016-03-24 22:04:04 +110068
Luis Machadod6a61bf2008-07-24 02:10:41 +100069#include <linux/kprobes.h>
70#include <linux/kdebug.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100071
Michael Neuling8b3c34c2013-02-13 16:21:32 +000072/* Transactional Memory debug */
73#ifdef TM_DEBUG_SW
74#define TM_DEBUG(x...) printk(KERN_INFO x)
75#else
76#define TM_DEBUG(x...) do { } while(0)
77#endif
78
Paul Mackerras14cf11a2005-09-26 16:04:21 +100079extern unsigned long _get_SP(void);
80
Paul Mackerrasd31626f2014-01-13 15:56:29 +110081#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Michael Ellerman54820532017-10-12 21:17:18 +110082/*
83 * Are we running in "Suspend disabled" mode? If so we have to block any
84 * sigreturn that would get us into suspended state, and we also warn in some
85 * other paths that we should never reach with suspend disabled.
86 */
87bool tm_suspend_disabled __ro_after_init = false;
88
Anton Blanchardb86fd2b2015-10-29 11:43:58 +110089static void check_if_tm_restore_required(struct task_struct *tsk)
Paul Mackerrasd31626f2014-01-13 15:56:29 +110090{
91 /*
92 * If we are saving the current thread's registers, and the
93 * thread is in a transactional state, set the TIF_RESTORE_TM
94 * bit so that we know to restore the registers before
95 * returning to userspace.
96 */
97 if (tsk == current && tsk->thread.regs &&
98 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
99 !test_thread_flag(TIF_RESTORE_TM)) {
Anshuman Khandual829023d2015-07-06 16:24:10 +0530100 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100101 set_thread_flag(TIF_RESTORE_TM);
102 }
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100103}
Cyril Burdc16b552016-09-23 16:18:08 +1000104
Cyril Bura7771172017-11-02 14:09:03 +1100105static bool tm_active_with_fp(struct task_struct *tsk)
106{
Breno Leitao5c784c82018-08-16 14:21:07 -0300107 return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
Cyril Bura7771172017-11-02 14:09:03 +1100108 (tsk->thread.ckpt_regs.msr & MSR_FP);
109}
110
111static bool tm_active_with_altivec(struct task_struct *tsk)
112{
Breno Leitao5c784c82018-08-16 14:21:07 -0300113 return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
Cyril Bura7771172017-11-02 14:09:03 +1100114 (tsk->thread.ckpt_regs.msr & MSR_VEC);
115}
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100116#else
Anton Blanchardb86fd2b2015-10-29 11:43:58 +1100117static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
Cyril Bura7771172017-11-02 14:09:03 +1100118static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
119static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100120#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
121
Anton Blanchard3eb5d582015-10-29 11:44:06 +1100122bool strict_msr_control;
123EXPORT_SYMBOL(strict_msr_control);
124
125static int __init enable_strict_msr_control(char *str)
126{
127 strict_msr_control = true;
128 pr_info("Enabling strict facility control\n");
129
130 return 0;
131}
132early_param("ppc_strict_facility_enable", enable_strict_msr_control);
133
Cyril Bur3cee0702016-09-23 16:18:10 +1000134unsigned long msr_check_and_set(unsigned long bits)
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100135{
136 unsigned long oldmsr = mfmsr();
137 unsigned long newmsr;
138
139 newmsr = oldmsr | bits;
140
141#ifdef CONFIG_VSX
142 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
143 newmsr |= MSR_VSX;
144#endif
145
146 if (oldmsr != newmsr)
147 mtmsr_isync(newmsr);
Cyril Bur3cee0702016-09-23 16:18:10 +1000148
149 return newmsr;
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100150}
Simon Guod1c72112018-05-23 15:01:44 +0800151EXPORT_SYMBOL_GPL(msr_check_and_set);
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100152
Anton Blanchard3eb5d582015-10-29 11:44:06 +1100153void __msr_check_and_clear(unsigned long bits)
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100154{
155 unsigned long oldmsr = mfmsr();
156 unsigned long newmsr;
157
158 newmsr = oldmsr & ~bits;
159
160#ifdef CONFIG_VSX
161 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
162 newmsr &= ~MSR_VSX;
163#endif
164
165 if (oldmsr != newmsr)
166 mtmsr_isync(newmsr);
167}
Anton Blanchard3eb5d582015-10-29 11:44:06 +1100168EXPORT_SYMBOL(__msr_check_and_clear);
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100169
Kevin Hao037f0ee2013-07-14 17:02:05 +0800170#ifdef CONFIG_PPC_FPU
Mathieu Malaterre1cdf0392018-02-25 18:22:23 +0100171static void __giveup_fpu(struct task_struct *tsk)
Cyril Bur87924682016-02-29 17:53:49 +1100172{
Anton Blanchard8eb98032016-05-29 22:03:50 +1000173 unsigned long msr;
174
Cyril Bur87924682016-02-29 17:53:49 +1100175 save_fpu(tsk);
Anton Blanchard8eb98032016-05-29 22:03:50 +1000176 msr = tsk->thread.regs->msr;
177 msr &= ~MSR_FP;
Cyril Bur87924682016-02-29 17:53:49 +1100178#ifdef CONFIG_VSX
179 if (cpu_has_feature(CPU_FTR_VSX))
Anton Blanchard8eb98032016-05-29 22:03:50 +1000180 msr &= ~MSR_VSX;
Cyril Bur87924682016-02-29 17:53:49 +1100181#endif
Anton Blanchard8eb98032016-05-29 22:03:50 +1000182 tsk->thread.regs->msr = msr;
Cyril Bur87924682016-02-29 17:53:49 +1100183}
184
Anton Blanchard98da5812015-10-29 11:44:01 +1100185void giveup_fpu(struct task_struct *tsk)
186{
Anton Blanchard98da5812015-10-29 11:44:01 +1100187 check_if_tm_restore_required(tsk);
188
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100189 msr_check_and_set(MSR_FP);
Anton Blanchard98da5812015-10-29 11:44:01 +1100190 __giveup_fpu(tsk);
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100191 msr_check_and_clear(MSR_FP);
Anton Blanchard98da5812015-10-29 11:44:01 +1100192}
193EXPORT_SYMBOL(giveup_fpu);
194
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000195/*
196 * Make sure the floating-point register state in the
197 * the thread_struct is up to date for task tsk.
198 */
199void flush_fp_to_thread(struct task_struct *tsk)
200{
201 if (tsk->thread.regs) {
202 /*
203 * We need to disable preemption here because if we didn't,
204 * another process could get scheduled after the regs->msr
205 * test but before we have finished saving the FP registers
206 * to the thread_struct. That process could take over the
207 * FPU, and then when we get scheduled again we would store
208 * bogus values for the remaining FP registers.
209 */
210 preempt_disable();
211 if (tsk->thread.regs->msr & MSR_FP) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000212 /*
213 * This should only ever be called for current or
214 * for a stopped child process. Since we save away
Anton Blanchardaf1bbc32015-10-29 11:43:57 +1100215 * the FP register state on context switch,
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000216 * there is something wrong if a stopped child appears
217 * to still have its FP state in the CPU registers.
218 */
219 BUG_ON(tsk != current);
Anton Blanchardb86fd2b2015-10-29 11:43:58 +1100220 giveup_fpu(tsk);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000221 }
222 preempt_enable();
223 }
224}
Paul Mackerrasde56a942011-06-29 00:21:34 +0000225EXPORT_SYMBOL_GPL(flush_fp_to_thread);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000226
227void enable_kernel_fp(void)
228{
Cyril Bure909fb82016-09-23 16:18:11 +1000229 unsigned long cpumsr;
230
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000231 WARN_ON(preemptible());
232
Cyril Bure909fb82016-09-23 16:18:11 +1000233 cpumsr = msr_check_and_set(MSR_FP);
Anton Blanchard611b0e52015-10-29 11:43:59 +1100234
Anton Blanchardd64d02c2015-12-10 20:04:05 +1100235 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
236 check_if_tm_restore_required(current);
Cyril Bure909fb82016-09-23 16:18:11 +1000237 /*
238 * If a thread has already been reclaimed then the
239 * checkpointed registers are on the CPU but have definitely
240 * been saved by the reclaim code. Don't need to and *cannot*
241 * giveup as this would save to the 'live' structure not the
242 * checkpointed structure.
243 */
Breno Leitao5c784c82018-08-16 14:21:07 -0300244 if (!MSR_TM_ACTIVE(cpumsr) &&
245 MSR_TM_ACTIVE(current->thread.regs->msr))
Cyril Bure909fb82016-09-23 16:18:11 +1000246 return;
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100247 __giveup_fpu(current);
Anton Blanchardd64d02c2015-12-10 20:04:05 +1100248 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000249}
250EXPORT_SYMBOL(enable_kernel_fp);
Cyril Bur70fe3d92016-02-29 17:53:47 +1100251
Benjamin Herrenschmidt6a303832017-08-16 16:01:15 +1000252static int restore_fp(struct task_struct *tsk)
253{
Cyril Bura7771172017-11-02 14:09:03 +1100254 if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
Cyril Bur70fe3d92016-02-29 17:53:47 +1100255 load_fp_state(&current->thread.fp_state);
256 current->thread.load_fp++;
257 return 1;
258 }
259 return 0;
260}
261#else
262static int restore_fp(struct task_struct *tsk) { return 0; }
Anton Blanchardd1e1cf22015-10-29 11:44:11 +1100263#endif /* CONFIG_PPC_FPU */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000264
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000265#ifdef CONFIG_ALTIVEC
Cyril Bur70fe3d92016-02-29 17:53:47 +1100266#define loadvec(thr) ((thr).load_vec)
267
Cyril Bur6f515d82016-02-29 17:53:50 +1100268static void __giveup_altivec(struct task_struct *tsk)
269{
Anton Blanchard8eb98032016-05-29 22:03:50 +1000270 unsigned long msr;
271
Cyril Bur6f515d82016-02-29 17:53:50 +1100272 save_altivec(tsk);
Anton Blanchard8eb98032016-05-29 22:03:50 +1000273 msr = tsk->thread.regs->msr;
274 msr &= ~MSR_VEC;
Cyril Bur6f515d82016-02-29 17:53:50 +1100275#ifdef CONFIG_VSX
276 if (cpu_has_feature(CPU_FTR_VSX))
Anton Blanchard8eb98032016-05-29 22:03:50 +1000277 msr &= ~MSR_VSX;
Cyril Bur6f515d82016-02-29 17:53:50 +1100278#endif
Anton Blanchard8eb98032016-05-29 22:03:50 +1000279 tsk->thread.regs->msr = msr;
Cyril Bur6f515d82016-02-29 17:53:50 +1100280}
281
Anton Blanchard98da5812015-10-29 11:44:01 +1100282void giveup_altivec(struct task_struct *tsk)
283{
Anton Blanchard98da5812015-10-29 11:44:01 +1100284 check_if_tm_restore_required(tsk);
285
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100286 msr_check_and_set(MSR_VEC);
Anton Blanchard98da5812015-10-29 11:44:01 +1100287 __giveup_altivec(tsk);
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100288 msr_check_and_clear(MSR_VEC);
Anton Blanchard98da5812015-10-29 11:44:01 +1100289}
290EXPORT_SYMBOL(giveup_altivec);
291
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000292void enable_kernel_altivec(void)
293{
Cyril Bure909fb82016-09-23 16:18:11 +1000294 unsigned long cpumsr;
295
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000296 WARN_ON(preemptible());
297
Cyril Bure909fb82016-09-23 16:18:11 +1000298 cpumsr = msr_check_and_set(MSR_VEC);
Anton Blanchard611b0e52015-10-29 11:43:59 +1100299
Anton Blanchardd64d02c2015-12-10 20:04:05 +1100300 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
301 check_if_tm_restore_required(current);
Cyril Bure909fb82016-09-23 16:18:11 +1000302 /*
303 * If a thread has already been reclaimed then the
304 * checkpointed registers are on the CPU but have definitely
305 * been saved by the reclaim code. Don't need to and *cannot*
306 * giveup as this would save to the 'live' structure not the
307 * checkpointed structure.
308 */
Breno Leitao5c784c82018-08-16 14:21:07 -0300309 if (!MSR_TM_ACTIVE(cpumsr) &&
310 MSR_TM_ACTIVE(current->thread.regs->msr))
Cyril Bure909fb82016-09-23 16:18:11 +1000311 return;
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100312 __giveup_altivec(current);
Anton Blanchardd64d02c2015-12-10 20:04:05 +1100313 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000314}
315EXPORT_SYMBOL(enable_kernel_altivec);
316
317/*
318 * Make sure the VMX/Altivec register state in the
319 * the thread_struct is up to date for task tsk.
320 */
321void flush_altivec_to_thread(struct task_struct *tsk)
322{
323 if (tsk->thread.regs) {
324 preempt_disable();
325 if (tsk->thread.regs->msr & MSR_VEC) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000326 BUG_ON(tsk != current);
Anton Blanchardb86fd2b2015-10-29 11:43:58 +1100327 giveup_altivec(tsk);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000328 }
329 preempt_enable();
330 }
331}
Paul Mackerrasde56a942011-06-29 00:21:34 +0000332EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
Cyril Bur70fe3d92016-02-29 17:53:47 +1100333
334static int restore_altivec(struct task_struct *tsk)
335{
Cyril Burdc16b552016-09-23 16:18:08 +1000336 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
Cyril Bura7771172017-11-02 14:09:03 +1100337 (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
Cyril Bur70fe3d92016-02-29 17:53:47 +1100338 load_vr_state(&tsk->thread.vr_state);
339 tsk->thread.used_vr = 1;
340 tsk->thread.load_vec++;
341
342 return 1;
343 }
344 return 0;
345}
346#else
347#define loadvec(thr) 0
348static inline int restore_altivec(struct task_struct *tsk) { return 0; }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000349#endif /* CONFIG_ALTIVEC */
350
Michael Neulingce48b212008-06-25 14:07:18 +1000351#ifdef CONFIG_VSX
Cyril Burbf6a4d52016-02-29 17:53:51 +1100352static void __giveup_vsx(struct task_struct *tsk)
Anton Blancharda7d623d2015-10-29 11:44:02 +1100353{
Benjamin Herrenschmidtdc801082017-08-16 16:01:17 +1000354 unsigned long msr = tsk->thread.regs->msr;
355
356 /*
357 * We should never be ssetting MSR_VSX without also setting
358 * MSR_FP and MSR_VEC
359 */
360 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
361
362 /* __giveup_fpu will clear MSR_VSX */
363 if (msr & MSR_FP)
Anton Blancharda7d623d2015-10-29 11:44:02 +1100364 __giveup_fpu(tsk);
Benjamin Herrenschmidtdc801082017-08-16 16:01:17 +1000365 if (msr & MSR_VEC)
Anton Blancharda7d623d2015-10-29 11:44:02 +1100366 __giveup_altivec(tsk);
Cyril Burbf6a4d52016-02-29 17:53:51 +1100367}
368
369static void giveup_vsx(struct task_struct *tsk)
370{
371 check_if_tm_restore_required(tsk);
372
373 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
Anton Blancharda7d623d2015-10-29 11:44:02 +1100374 __giveup_vsx(tsk);
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100375 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
Anton Blancharda7d623d2015-10-29 11:44:02 +1100376}
Cyril Burbf6a4d52016-02-29 17:53:51 +1100377
Michael Neulingce48b212008-06-25 14:07:18 +1000378void enable_kernel_vsx(void)
379{
Cyril Bure909fb82016-09-23 16:18:11 +1000380 unsigned long cpumsr;
381
Michael Neulingce48b212008-06-25 14:07:18 +1000382 WARN_ON(preemptible());
383
Cyril Bure909fb82016-09-23 16:18:11 +1000384 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
Anton Blanchard611b0e52015-10-29 11:43:59 +1100385
Benjamin Herrenschmidt5a69aec2017-08-16 16:01:14 +1000386 if (current->thread.regs &&
387 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
Anton Blanchardd64d02c2015-12-10 20:04:05 +1100388 check_if_tm_restore_required(current);
Cyril Bure909fb82016-09-23 16:18:11 +1000389 /*
390 * If a thread has already been reclaimed then the
391 * checkpointed registers are on the CPU but have definitely
392 * been saved by the reclaim code. Don't need to and *cannot*
393 * giveup as this would save to the 'live' structure not the
394 * checkpointed structure.
395 */
Breno Leitao5c784c82018-08-16 14:21:07 -0300396 if (!MSR_TM_ACTIVE(cpumsr) &&
397 MSR_TM_ACTIVE(current->thread.regs->msr))
Cyril Bure909fb82016-09-23 16:18:11 +1000398 return;
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100399 __giveup_vsx(current);
Anton Blanchard611b0e52015-10-29 11:43:59 +1100400 }
Michael Neulingce48b212008-06-25 14:07:18 +1000401}
402EXPORT_SYMBOL(enable_kernel_vsx);
Michael Neulingce48b212008-06-25 14:07:18 +1000403
404void flush_vsx_to_thread(struct task_struct *tsk)
405{
406 if (tsk->thread.regs) {
407 preempt_disable();
Benjamin Herrenschmidt5a69aec2017-08-16 16:01:14 +1000408 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
Michael Neulingce48b212008-06-25 14:07:18 +1000409 BUG_ON(tsk != current);
Michael Neulingce48b212008-06-25 14:07:18 +1000410 giveup_vsx(tsk);
411 }
412 preempt_enable();
413 }
414}
Paul Mackerrasde56a942011-06-29 00:21:34 +0000415EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
Cyril Bur70fe3d92016-02-29 17:53:47 +1100416
417static int restore_vsx(struct task_struct *tsk)
418{
419 if (cpu_has_feature(CPU_FTR_VSX)) {
420 tsk->thread.used_vsr = 1;
421 return 1;
422 }
423
424 return 0;
425}
426#else
427static inline int restore_vsx(struct task_struct *tsk) { return 0; }
Michael Neulingce48b212008-06-25 14:07:18 +1000428#endif /* CONFIG_VSX */
429
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000430#ifdef CONFIG_SPE
Anton Blanchard98da5812015-10-29 11:44:01 +1100431void giveup_spe(struct task_struct *tsk)
432{
Anton Blanchard98da5812015-10-29 11:44:01 +1100433 check_if_tm_restore_required(tsk);
434
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100435 msr_check_and_set(MSR_SPE);
Anton Blanchard98da5812015-10-29 11:44:01 +1100436 __giveup_spe(tsk);
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100437 msr_check_and_clear(MSR_SPE);
Anton Blanchard98da5812015-10-29 11:44:01 +1100438}
439EXPORT_SYMBOL(giveup_spe);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000440
441void enable_kernel_spe(void)
442{
443 WARN_ON(preemptible());
444
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100445 msr_check_and_set(MSR_SPE);
Anton Blanchard611b0e52015-10-29 11:43:59 +1100446
Anton Blanchardd64d02c2015-12-10 20:04:05 +1100447 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
448 check_if_tm_restore_required(current);
Anton Blancharda0e72cf2015-10-29 11:44:04 +1100449 __giveup_spe(current);
Anton Blanchardd64d02c2015-12-10 20:04:05 +1100450 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000451}
452EXPORT_SYMBOL(enable_kernel_spe);
453
454void flush_spe_to_thread(struct task_struct *tsk)
455{
456 if (tsk->thread.regs) {
457 preempt_disable();
458 if (tsk->thread.regs->msr & MSR_SPE) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000459 BUG_ON(tsk != current);
yu liu685659e2011-06-14 18:34:25 -0500460 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
Kumar Gala0ee6c152007-08-28 21:15:53 -0500461 giveup_spe(tsk);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000462 }
463 preempt_enable();
464 }
465}
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000466#endif /* CONFIG_SPE */
467
Anton Blanchardc2085052015-10-29 11:44:08 +1100468static unsigned long msr_all_available;
469
470static int __init init_msr_all_available(void)
471{
472#ifdef CONFIG_PPC_FPU
473 msr_all_available |= MSR_FP;
474#endif
475#ifdef CONFIG_ALTIVEC
476 if (cpu_has_feature(CPU_FTR_ALTIVEC))
477 msr_all_available |= MSR_VEC;
478#endif
479#ifdef CONFIG_VSX
480 if (cpu_has_feature(CPU_FTR_VSX))
481 msr_all_available |= MSR_VSX;
482#endif
483#ifdef CONFIG_SPE
484 if (cpu_has_feature(CPU_FTR_SPE))
485 msr_all_available |= MSR_SPE;
486#endif
487
488 return 0;
489}
490early_initcall(init_msr_all_available);
491
492void giveup_all(struct task_struct *tsk)
493{
494 unsigned long usermsr;
495
496 if (!tsk->thread.regs)
497 return;
498
499 usermsr = tsk->thread.regs->msr;
500
501 if ((usermsr & msr_all_available) == 0)
502 return;
503
504 msr_check_and_set(msr_all_available);
Cyril Burb0f16b42016-09-23 16:18:09 +1000505 check_if_tm_restore_required(tsk);
Anton Blanchardc2085052015-10-29 11:44:08 +1100506
Benjamin Herrenschmidt96c79b62017-08-16 16:01:18 +1000507 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
508
Anton Blanchardc2085052015-10-29 11:44:08 +1100509#ifdef CONFIG_PPC_FPU
510 if (usermsr & MSR_FP)
511 __giveup_fpu(tsk);
512#endif
513#ifdef CONFIG_ALTIVEC
514 if (usermsr & MSR_VEC)
515 __giveup_altivec(tsk);
516#endif
Anton Blanchardc2085052015-10-29 11:44:08 +1100517#ifdef CONFIG_SPE
518 if (usermsr & MSR_SPE)
519 __giveup_spe(tsk);
520#endif
521
522 msr_check_and_clear(msr_all_available);
523}
524EXPORT_SYMBOL(giveup_all);
525
Cyril Bur70fe3d92016-02-29 17:53:47 +1100526void restore_math(struct pt_regs *regs)
527{
528 unsigned long msr;
529
Breno Leitao5c784c82018-08-16 14:21:07 -0300530 if (!MSR_TM_ACTIVE(regs->msr) &&
Cyril Burdc16b552016-09-23 16:18:08 +1000531 !current->thread.load_fp && !loadvec(current->thread))
Cyril Bur70fe3d92016-02-29 17:53:47 +1100532 return;
533
534 msr = regs->msr;
535 msr_check_and_set(msr_all_available);
536
537 /*
538 * Only reload if the bit is not set in the user MSR, the bit BEING set
539 * indicates that the registers are hot
540 */
541 if ((!(msr & MSR_FP)) && restore_fp(current))
542 msr |= MSR_FP | current->thread.fpexc_mode;
543
544 if ((!(msr & MSR_VEC)) && restore_altivec(current))
545 msr |= MSR_VEC;
546
547 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
548 restore_vsx(current)) {
549 msr |= MSR_VSX;
550 }
551
552 msr_check_and_clear(msr_all_available);
553
554 regs->msr = msr;
555}
556
Mathieu Malaterre1cdf0392018-02-25 18:22:23 +0100557static void save_all(struct task_struct *tsk)
Cyril Burde2a20a2016-02-29 17:53:48 +1100558{
559 unsigned long usermsr;
560
561 if (!tsk->thread.regs)
562 return;
563
564 usermsr = tsk->thread.regs->msr;
565
566 if ((usermsr & msr_all_available) == 0)
567 return;
568
569 msr_check_and_set(msr_all_available);
570
Benjamin Herrenschmidt96c79b62017-08-16 16:01:18 +1000571 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
Cyril Burde2a20a2016-02-29 17:53:48 +1100572
Benjamin Herrenschmidt96c79b62017-08-16 16:01:18 +1000573 if (usermsr & MSR_FP)
574 save_fpu(tsk);
575
576 if (usermsr & MSR_VEC)
577 save_altivec(tsk);
Cyril Burde2a20a2016-02-29 17:53:48 +1100578
579 if (usermsr & MSR_SPE)
580 __giveup_spe(tsk);
581
582 msr_check_and_clear(msr_all_available);
Ram Paic76662e2018-07-17 06:51:05 -0700583 thread_pkey_regs_save(&tsk->thread);
Cyril Burde2a20a2016-02-29 17:53:48 +1100584}
585
Anton Blanchard579e6332015-10-29 11:44:09 +1100586void flush_all_to_thread(struct task_struct *tsk)
587{
588 if (tsk->thread.regs) {
589 preempt_disable();
590 BUG_ON(tsk != current);
Cyril Burde2a20a2016-02-29 17:53:48 +1100591 save_all(tsk);
Anton Blanchard579e6332015-10-29 11:44:09 +1100592
593#ifdef CONFIG_SPE
594 if (tsk->thread.regs->msr & MSR_SPE)
595 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
596#endif
597
598 preempt_enable();
599 }
600}
601EXPORT_SYMBOL(flush_all_to_thread);
602
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000603#ifdef CONFIG_PPC_ADV_DEBUG_REGS
604void do_send_trap(struct pt_regs *regs, unsigned long address,
Eric W. Biederman47355042018-01-16 16:12:38 -0600605 unsigned long error_code, int breakpt)
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000606{
Eric W. Biederman47355042018-01-16 16:12:38 -0600607 current->thread.trap_nr = TRAP_HWBKPT;
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000608 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
609 11, SIGSEGV) == NOTIFY_STOP)
610 return;
611
612 /* Deliver the signal to userspace */
Eric W. Biedermanf71dd7d2018-01-22 14:37:25 -0600613 force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
614 (void __user *)address);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000615}
616#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
Michael Neuling9422de32012-12-20 14:06:44 +0000617void do_break (struct pt_regs *regs, unsigned long address,
Luis Machadod6a61bf2008-07-24 02:10:41 +1000618 unsigned long error_code)
619{
620 siginfo_t info;
621
Ananth N Mavinakayanahalli41ab5262012-08-23 21:27:09 +0000622 current->thread.trap_nr = TRAP_HWBKPT;
Luis Machadod6a61bf2008-07-24 02:10:41 +1000623 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
624 11, SIGSEGV) == NOTIFY_STOP)
625 return;
626
Michael Neuling9422de32012-12-20 14:06:44 +0000627 if (debugger_break_match(regs))
Luis Machadod6a61bf2008-07-24 02:10:41 +1000628 return;
629
Michael Neuling9422de32012-12-20 14:06:44 +0000630 /* Clear the breakpoint */
631 hw_breakpoint_disable();
Luis Machadod6a61bf2008-07-24 02:10:41 +1000632
633 /* Deliver the signal to userspace */
Eric W. Biederman3eb0f512018-04-17 15:26:37 -0500634 clear_siginfo(&info);
Luis Machadod6a61bf2008-07-24 02:10:41 +1000635 info.si_signo = SIGTRAP;
636 info.si_errno = 0;
637 info.si_code = TRAP_HWBKPT;
638 info.si_addr = (void __user *)address;
639 force_sig_info(SIGTRAP, &info, current);
640}
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000641#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
Luis Machadod6a61bf2008-07-24 02:10:41 +1000642
Michael Neuling9422de32012-12-20 14:06:44 +0000643static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
Michael Ellermana2ceff52008-03-28 19:11:48 +1100644
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000645#ifdef CONFIG_PPC_ADV_DEBUG_REGS
646/*
647 * Set the debug registers back to their default "safe" values.
648 */
649static void set_debug_reg_defaults(struct thread_struct *thread)
650{
Bharat Bhushan51ae8d42013-07-04 11:45:46 +0530651 thread->debug.iac1 = thread->debug.iac2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000652#if CONFIG_PPC_ADV_DEBUG_IACS > 2
Bharat Bhushan51ae8d42013-07-04 11:45:46 +0530653 thread->debug.iac3 = thread->debug.iac4 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000654#endif
Bharat Bhushan51ae8d42013-07-04 11:45:46 +0530655 thread->debug.dac1 = thread->debug.dac2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000656#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
Bharat Bhushan51ae8d42013-07-04 11:45:46 +0530657 thread->debug.dvc1 = thread->debug.dvc2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000658#endif
Bharat Bhushan51ae8d42013-07-04 11:45:46 +0530659 thread->debug.dbcr0 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000660#ifdef CONFIG_BOOKE
661 /*
662 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
663 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +0530664 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000665 DBCR1_IAC3US | DBCR1_IAC4US;
666 /*
667 * Force Data Address Compare User/Supervisor bits to be User-only
668 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
669 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +0530670 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000671#else
Bharat Bhushan51ae8d42013-07-04 11:45:46 +0530672 thread->debug.dbcr1 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000673#endif
674}
675
Scott Woodf5f97212013-11-22 15:52:29 -0600676static void prime_debug_regs(struct debug_reg *debug)
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000677{
Scott Wood6cecf762013-05-13 14:14:53 +0000678 /*
679 * We could have inherited MSR_DE from userspace, since
680 * it doesn't get cleared on exception entry. Make sure
681 * MSR_DE is clear before we enable any debug events.
682 */
683 mtmsr(mfmsr() & ~MSR_DE);
684
Scott Woodf5f97212013-11-22 15:52:29 -0600685 mtspr(SPRN_IAC1, debug->iac1);
686 mtspr(SPRN_IAC2, debug->iac2);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000687#if CONFIG_PPC_ADV_DEBUG_IACS > 2
Scott Woodf5f97212013-11-22 15:52:29 -0600688 mtspr(SPRN_IAC3, debug->iac3);
689 mtspr(SPRN_IAC4, debug->iac4);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000690#endif
Scott Woodf5f97212013-11-22 15:52:29 -0600691 mtspr(SPRN_DAC1, debug->dac1);
692 mtspr(SPRN_DAC2, debug->dac2);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000693#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
Scott Woodf5f97212013-11-22 15:52:29 -0600694 mtspr(SPRN_DVC1, debug->dvc1);
695 mtspr(SPRN_DVC2, debug->dvc2);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000696#endif
Scott Woodf5f97212013-11-22 15:52:29 -0600697 mtspr(SPRN_DBCR0, debug->dbcr0);
698 mtspr(SPRN_DBCR1, debug->dbcr1);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000699#ifdef CONFIG_BOOKE
Scott Woodf5f97212013-11-22 15:52:29 -0600700 mtspr(SPRN_DBCR2, debug->dbcr2);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000701#endif
702}
703/*
704 * Unless neither the old or new thread are making use of the
705 * debug registers, set the debug registers from the values
706 * stored in the new thread.
707 */
Scott Woodf5f97212013-11-22 15:52:29 -0600708void switch_booke_debug_regs(struct debug_reg *new_debug)
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000709{
Bharat Bhushan51ae8d42013-07-04 11:45:46 +0530710 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
Scott Woodf5f97212013-11-22 15:52:29 -0600711 || (new_debug->dbcr0 & DBCR0_IDM))
712 prime_debug_regs(new_debug);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000713}
Bharat Bhushan3743c9b2013-07-04 12:27:44 +0530714EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000715#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
K.Prasade0780b72011-02-10 04:44:35 +0000716#ifndef CONFIG_HAVE_HW_BREAKPOINT
Christophe Leroyb5ac51d2018-07-05 16:25:05 +0000717static void set_breakpoint(struct arch_hw_breakpoint *brk)
718{
719 preempt_disable();
720 __set_breakpoint(brk);
721 preempt_enable();
722}
723
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000724static void set_debug_reg_defaults(struct thread_struct *thread)
725{
Michael Neuling9422de32012-12-20 14:06:44 +0000726 thread->hw_brk.address = 0;
727 thread->hw_brk.type = 0;
Nicholas Piggin252988c2018-04-01 15:50:36 +1000728 if (ppc_breakpoint_available())
729 set_breakpoint(&thread->hw_brk);
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000730}
K.Prasade0780b72011-02-10 04:44:35 +0000731#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +0000732#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
733
Dave Kleikamp172ae2e2010-02-08 11:50:57 +0000734#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Michael Neuling9422de32012-12-20 14:06:44 +0000735static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
736{
Benjamin Herrenschmidtc6c9eac2009-09-08 14:16:58 +0000737 mtspr(SPRN_DAC1, dabr);
Dave Kleikamp221c1852010-03-05 10:43:24 +0000738#ifdef CONFIG_PPC_47x
739 isync();
740#endif
Michael Neuling9422de32012-12-20 14:06:44 +0000741 return 0;
742}
Benjamin Herrenschmidtc6c9eac2009-09-08 14:16:58 +0000743#elif defined(CONFIG_PPC_BOOK3S)
Michael Neuling9422de32012-12-20 14:06:44 +0000744static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
745{
Michael Ellermancab0af92005-11-03 15:30:49 +1100746 mtspr(SPRN_DABR, dabr);
Michael Neuling82a9f162013-05-16 20:27:31 +0000747 if (cpu_has_feature(CPU_FTR_DABRX))
748 mtspr(SPRN_DABRX, dabrx);
Michael Ellermancab0af92005-11-03 15:30:49 +1100749 return 0;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000750}
Christophe Leroy4ad86222016-11-29 09:52:15 +0100751#elif defined(CONFIG_PPC_8xx)
752static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
753{
754 unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
755 unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
756 unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
757
758 if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
759 lctrl1 |= 0xa0000;
760 else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
761 lctrl1 |= 0xf0000;
762 else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
763 lctrl2 = 0;
764
765 mtspr(SPRN_LCTRL2, 0);
766 mtspr(SPRN_CMPE, addr);
767 mtspr(SPRN_CMPF, addr + 4);
768 mtspr(SPRN_LCTRL1, lctrl1);
769 mtspr(SPRN_LCTRL2, lctrl2);
770
771 return 0;
772}
Michael Neuling9422de32012-12-20 14:06:44 +0000773#else
774static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
775{
776 return -EINVAL;
777}
778#endif
779
780static inline int set_dabr(struct arch_hw_breakpoint *brk)
781{
782 unsigned long dabr, dabrx;
783
784 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
785 dabrx = ((brk->type >> 3) & 0x7);
786
787 if (ppc_md.set_dabr)
788 return ppc_md.set_dabr(dabr, dabrx);
789
790 return __set_dabr(dabr, dabrx);
791}
792
Michael Neulingbf99de32012-12-20 14:06:45 +0000793static inline int set_dawr(struct arch_hw_breakpoint *brk)
794{
Michael Neuling05d694e2013-01-24 15:02:58 +0000795 unsigned long dawr, dawrx, mrd;
Michael Neulingbf99de32012-12-20 14:06:45 +0000796
797 dawr = brk->address;
798
799 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
800 << (63 - 58); //* read/write bits */
801 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
802 << (63 - 59); //* translate */
803 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
804 >> 3; //* PRIM bits */
Michael Neuling05d694e2013-01-24 15:02:58 +0000805 /* dawr length is stored in field MDR bits 48:53. Matches range in
806 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
807 0b111111=64DW.
808 brk->len is in bytes.
809 This aligns up to double word size, shifts and does the bias.
810 */
811 mrd = ((brk->len + 7) >> 3) - 1;
812 dawrx |= (mrd & 0x3f) << (63 - 53);
Michael Neulingbf99de32012-12-20 14:06:45 +0000813
814 if (ppc_md.set_dawr)
815 return ppc_md.set_dawr(dawr, dawrx);
816 mtspr(SPRN_DAWR, dawr);
817 mtspr(SPRN_DAWRX, dawrx);
818 return 0;
819}
820
Paul Gortmaker21f58502014-04-29 15:25:17 -0400821void __set_breakpoint(struct arch_hw_breakpoint *brk)
Michael Neuling9422de32012-12-20 14:06:44 +0000822{
Christoph Lameter69111ba2014-10-21 15:23:25 -0500823 memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
Michael Neuling9422de32012-12-20 14:06:44 +0000824
Michael Neulingbf99de32012-12-20 14:06:45 +0000825 if (cpu_has_feature(CPU_FTR_DAWR))
Nicholas Piggin252988c2018-04-01 15:50:36 +1000826 // Power8 or later
Paul Gortmaker04c32a52014-04-29 15:25:16 -0400827 set_dawr(brk);
Nicholas Piggin252988c2018-04-01 15:50:36 +1000828 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
829 // Power7 or earlier
Paul Gortmaker04c32a52014-04-29 15:25:16 -0400830 set_dabr(brk);
Nicholas Piggin252988c2018-04-01 15:50:36 +1000831 else
832 // Shouldn't happen due to higher level checks
833 WARN_ON_ONCE(1);
Michael Neuling9422de32012-12-20 14:06:44 +0000834}
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000835
Michael Neuling404b27d2018-03-27 15:37:17 +1100836/* Check if we have DAWR or DABR hardware */
837bool ppc_breakpoint_available(void)
838{
839 if (cpu_has_feature(CPU_FTR_DAWR))
840 return true; /* POWER8 DAWR */
841 if (cpu_has_feature(CPU_FTR_ARCH_207S))
842 return false; /* POWER9 with DAWR disabled */
843 /* DABR: Everything but POWER8 and POWER9 */
844 return true;
845}
846EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
847
Michael Neuling9422de32012-12-20 14:06:44 +0000848static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
849 struct arch_hw_breakpoint *b)
850{
851 if (a->address != b->address)
852 return false;
853 if (a->type != b->type)
854 return false;
855 if (a->len != b->len)
856 return false;
857 return true;
858}
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100859
Michael Neulingfb096922013-02-13 16:21:37 +0000860#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Cyril Bur5d176f72016-09-14 18:02:16 +1000861
862static inline bool tm_enabled(struct task_struct *tsk)
863{
864 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
865}
866
Cyril Buredd00b82018-02-01 12:07:46 +1100867static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100868{
Michael Neuling7f821fc2015-11-19 15:44:45 +1100869 /*
870 * Use the current MSR TM suspended bit to track if we have
871 * checkpointed state outstanding.
872 * On signal delivery, we'd normally reclaim the checkpointed
873 * state to obtain stack pointer (see:get_tm_stackpointer()).
874 * This will then directly return to userspace without going
875 * through __switch_to(). However, if the stack frame is bad,
876 * we need to exit this thread which calls __switch_to() which
877 * will again attempt to reclaim the already saved tm state.
878 * Hence we need to check that we've not already reclaimed
879 * this state.
880 * We do this using the current MSR, rather tracking it in
881 * some specific thread_struct bit, as it has the additional
Michael Ellerman027dfac2016-06-01 16:34:37 +1000882 * benefit of checking for a potential TM bad thing exception.
Michael Neuling7f821fc2015-11-19 15:44:45 +1100883 */
884 if (!MSR_TM_SUSPENDED(mfmsr()))
885 return;
886
Cyril Bur91381b92017-11-02 14:09:04 +1100887 giveup_all(container_of(thr, struct task_struct, thread));
888
Cyril Bureb5c3f12017-11-02 14:09:05 +1100889 tm_reclaim(thr, cause);
890
Michael Neulingf48e91e2017-05-08 17:16:26 +1000891 /*
892 * If we are in a transaction and FP is off then we can't have
893 * used FP inside that transaction. Hence the checkpointed
894 * state is the same as the live state. We need to copy the
895 * live state to the checkpointed state so that when the
896 * transaction is restored, the checkpointed state is correct
897 * and the aborted transaction sees the correct state. We use
898 * ckpt_regs.msr here as that's what tm_reclaim will use to
899 * determine if it's going to write the checkpointed state or
900 * not. So either this will write the checkpointed registers,
901 * or reclaim will. Similarly for VMX.
902 */
903 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
904 memcpy(&thr->ckfp_state, &thr->fp_state,
905 sizeof(struct thread_fp_state));
906 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
907 memcpy(&thr->ckvr_state, &thr->vr_state,
908 sizeof(struct thread_vr_state));
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100909}
910
911void tm_reclaim_current(uint8_t cause)
912{
913 tm_enable();
Cyril Buredd00b82018-02-01 12:07:46 +1100914 tm_reclaim_thread(&current->thread, cause);
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100915}
916
Michael Neulingfb096922013-02-13 16:21:37 +0000917static inline void tm_reclaim_task(struct task_struct *tsk)
918{
919 /* We have to work out if we're switching from/to a task that's in the
920 * middle of a transaction.
921 *
922 * In switching we need to maintain a 2nd register state as
923 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
Cyril Bur000ec282016-09-23 16:18:25 +1000924 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
925 * ckvr_state
Michael Neulingfb096922013-02-13 16:21:37 +0000926 *
927 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
928 */
929 struct thread_struct *thr = &tsk->thread;
930
931 if (!thr->regs)
932 return;
933
934 if (!MSR_TM_ACTIVE(thr->regs->msr))
935 goto out_and_saveregs;
936
Michael Neuling92fb8692017-10-12 21:17:19 +1100937 WARN_ON(tm_suspend_disabled);
938
Michael Neulingfb096922013-02-13 16:21:37 +0000939 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
940 "ccr=%lx, msr=%lx, trap=%lx)\n",
941 tsk->pid, thr->regs->nip,
942 thr->regs->ccr, thr->regs->msr,
943 thr->regs->trap);
944
Cyril Buredd00b82018-02-01 12:07:46 +1100945 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
Michael Neulingfb096922013-02-13 16:21:37 +0000946
947 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
948 tsk->pid);
949
950out_and_saveregs:
951 /* Always save the regs here, even if a transaction's not active.
952 * This context-switches a thread's TM info SPRs. We do it here to
953 * be consistent with the restore path (in recheckpoint) which
954 * cannot happen later in _switch().
955 */
956 tm_save_sprs(thr);
957}
958
Cyril Bureb5c3f12017-11-02 14:09:05 +1100959extern void __tm_recheckpoint(struct thread_struct *thread);
Michael Neulinge6b8fd02014-04-04 20:19:48 +1100960
Cyril Bureb5c3f12017-11-02 14:09:05 +1100961void tm_recheckpoint(struct thread_struct *thread)
Michael Neulinge6b8fd02014-04-04 20:19:48 +1100962{
963 unsigned long flags;
964
Cyril Bur5d176f72016-09-14 18:02:16 +1000965 if (!(thread->regs->msr & MSR_TM))
966 return;
967
Michael Neulinge6b8fd02014-04-04 20:19:48 +1100968 /* We really can't be interrupted here as the TEXASR registers can't
969 * change and later in the trecheckpoint code, we have a userspace R1.
970 * So let's hard disable over this region.
971 */
972 local_irq_save(flags);
973 hard_irq_disable();
974
975 /* The TM SPRs are restored here, so that TEXASR.FS can be set
976 * before the trecheckpoint and no explosion occurs.
977 */
978 tm_restore_sprs(thread);
979
Cyril Bureb5c3f12017-11-02 14:09:05 +1100980 __tm_recheckpoint(thread);
Michael Neulinge6b8fd02014-04-04 20:19:48 +1100981
982 local_irq_restore(flags);
983}
984
Michael Neulingbc2a9402013-02-13 16:21:40 +0000985static inline void tm_recheckpoint_new_task(struct task_struct *new)
Michael Neulingfb096922013-02-13 16:21:37 +0000986{
Michael Neulingfb096922013-02-13 16:21:37 +0000987 if (!cpu_has_feature(CPU_FTR_TM))
988 return;
989
990 /* Recheckpoint the registers of the thread we're about to switch to.
991 *
992 * If the task was using FP, we non-lazily reload both the original and
993 * the speculative FP register states. This is because the kernel
994 * doesn't see if/when a TM rollback occurs, so if we take an FP
Cyril Burdc310662016-09-23 16:18:24 +1000995 * unavailable later, we are unable to determine which set of FP regs
Michael Neulingfb096922013-02-13 16:21:37 +0000996 * need to be restored.
997 */
Cyril Bur5d176f72016-09-14 18:02:16 +1000998 if (!tm_enabled(new))
Michael Neulingfb096922013-02-13 16:21:37 +0000999 return;
1000
Michael Neulinge6b8fd02014-04-04 20:19:48 +11001001 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1002 tm_restore_sprs(&new->thread);
Michael Neulingfb096922013-02-13 16:21:37 +00001003 return;
Michael Neulinge6b8fd02014-04-04 20:19:48 +11001004 }
Michael Neulingfb096922013-02-13 16:21:37 +00001005 /* Recheckpoint to restore original checkpointed register state. */
Cyril Bureb5c3f12017-11-02 14:09:05 +11001006 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1007 new->pid, new->thread.regs->msr);
Michael Neulingfb096922013-02-13 16:21:37 +00001008
Cyril Bureb5c3f12017-11-02 14:09:05 +11001009 tm_recheckpoint(&new->thread);
Michael Neulingfb096922013-02-13 16:21:37 +00001010
Cyril Burdc310662016-09-23 16:18:24 +10001011 /*
1012 * The checkpointed state has been restored but the live state has
1013 * not, ensure all the math functionality is turned off to trigger
1014 * restore_math() to reload.
1015 */
1016 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
Michael Neulingfb096922013-02-13 16:21:37 +00001017
1018 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1019 "(kernel msr 0x%lx)\n",
1020 new->pid, mfmsr());
1021}
1022
Cyril Burdc310662016-09-23 16:18:24 +10001023static inline void __switch_to_tm(struct task_struct *prev,
1024 struct task_struct *new)
Michael Neulingfb096922013-02-13 16:21:37 +00001025{
1026 if (cpu_has_feature(CPU_FTR_TM)) {
Cyril Bur5d176f72016-09-14 18:02:16 +10001027 if (tm_enabled(prev) || tm_enabled(new))
1028 tm_enable();
1029
1030 if (tm_enabled(prev)) {
1031 prev->thread.load_tm++;
1032 tm_reclaim_task(prev);
1033 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1034 prev->thread.regs->msr &= ~MSR_TM;
1035 }
1036
Cyril Burdc310662016-09-23 16:18:24 +10001037 tm_recheckpoint_new_task(new);
Michael Neulingfb096922013-02-13 16:21:37 +00001038 }
1039}
Paul Mackerrasd31626f2014-01-13 15:56:29 +11001040
1041/*
1042 * This is called if we are on the way out to userspace and the
1043 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1044 * FP and/or vector state and does so if necessary.
1045 * If userspace is inside a transaction (whether active or
1046 * suspended) and FP/VMX/VSX instructions have ever been enabled
1047 * inside that transaction, then we have to keep them enabled
1048 * and keep the FP/VMX/VSX state loaded while ever the transaction
1049 * continues. The reason is that if we didn't, and subsequently
1050 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1051 * we don't know whether it's the same transaction, and thus we
1052 * don't know which of the checkpointed state and the transactional
1053 * state to use.
1054 */
1055void restore_tm_state(struct pt_regs *regs)
1056{
1057 unsigned long msr_diff;
1058
Cyril Burdc310662016-09-23 16:18:24 +10001059 /*
1060 * This is the only moment we should clear TIF_RESTORE_TM as
1061 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1062 * again, anything else could lead to an incorrect ckpt_msr being
1063 * saved and therefore incorrect signal contexts.
1064 */
Paul Mackerrasd31626f2014-01-13 15:56:29 +11001065 clear_thread_flag(TIF_RESTORE_TM);
1066 if (!MSR_TM_ACTIVE(regs->msr))
1067 return;
1068
Anshuman Khandual829023d2015-07-06 16:24:10 +05301069 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
Paul Mackerrasd31626f2014-01-13 15:56:29 +11001070 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
Cyril Bur70fe3d92016-02-29 17:53:47 +11001071
Cyril Burdc16b552016-09-23 16:18:08 +10001072 /* Ensure that restore_math() will restore */
1073 if (msr_diff & MSR_FP)
1074 current->thread.load_fp = 1;
Valentin Rothberg39715bf2016-10-05 07:57:26 +02001075#ifdef CONFIG_ALTIVEC
Cyril Burdc16b552016-09-23 16:18:08 +10001076 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1077 current->thread.load_vec = 1;
1078#endif
Cyril Bur70fe3d92016-02-29 17:53:47 +11001079 restore_math(regs);
1080
Paul Mackerrasd31626f2014-01-13 15:56:29 +11001081 regs->msr |= msr_diff;
1082}
1083
Michael Neulingfb096922013-02-13 16:21:37 +00001084#else
1085#define tm_recheckpoint_new_task(new)
Cyril Burdc310662016-09-23 16:18:24 +10001086#define __switch_to_tm(prev, new)
Michael Neulingfb096922013-02-13 16:21:37 +00001087#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Michael Neuling9422de32012-12-20 14:06:44 +00001088
Anton Blanchard152d5232015-10-29 11:43:55 +11001089static inline void save_sprs(struct thread_struct *t)
1090{
1091#ifdef CONFIG_ALTIVEC
Oliver O'Halloran01d7c2a22016-03-08 09:08:47 +11001092 if (cpu_has_feature(CPU_FTR_ALTIVEC))
Anton Blanchard152d5232015-10-29 11:43:55 +11001093 t->vrsave = mfspr(SPRN_VRSAVE);
1094#endif
1095#ifdef CONFIG_PPC_BOOK3S_64
1096 if (cpu_has_feature(CPU_FTR_DSCR))
1097 t->dscr = mfspr(SPRN_DSCR);
1098
1099 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1100 t->bescr = mfspr(SPRN_BESCR);
1101 t->ebbhr = mfspr(SPRN_EBBHR);
1102 t->ebbrr = mfspr(SPRN_EBBRR);
1103
1104 t->fscr = mfspr(SPRN_FSCR);
1105
1106 /*
1107 * Note that the TAR is not available for use in the kernel.
1108 * (To provide this, the TAR should be backed up/restored on
1109 * exception entry/exit instead, and be in pt_regs. FIXME,
1110 * this should be in pt_regs anyway (for debug).)
1111 */
1112 t->tar = mfspr(SPRN_TAR);
1113 }
1114#endif
Ram Pai06bb53b2018-01-18 17:50:31 -08001115
1116 thread_pkey_regs_save(t);
Anton Blanchard152d5232015-10-29 11:43:55 +11001117}
1118
1119static inline void restore_sprs(struct thread_struct *old_thread,
1120 struct thread_struct *new_thread)
1121{
1122#ifdef CONFIG_ALTIVEC
1123 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1124 old_thread->vrsave != new_thread->vrsave)
1125 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1126#endif
1127#ifdef CONFIG_PPC_BOOK3S_64
1128 if (cpu_has_feature(CPU_FTR_DSCR)) {
1129 u64 dscr = get_paca()->dscr_default;
Michael Neulingb57bd2d2016-06-09 12:31:08 +10001130 if (new_thread->dscr_inherit)
Anton Blanchard152d5232015-10-29 11:43:55 +11001131 dscr = new_thread->dscr;
Anton Blanchard152d5232015-10-29 11:43:55 +11001132
1133 if (old_thread->dscr != dscr)
1134 mtspr(SPRN_DSCR, dscr);
Anton Blanchard152d5232015-10-29 11:43:55 +11001135 }
1136
1137 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1138 if (old_thread->bescr != new_thread->bescr)
1139 mtspr(SPRN_BESCR, new_thread->bescr);
1140 if (old_thread->ebbhr != new_thread->ebbhr)
1141 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1142 if (old_thread->ebbrr != new_thread->ebbrr)
1143 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1144
Michael Neulingb57bd2d2016-06-09 12:31:08 +10001145 if (old_thread->fscr != new_thread->fscr)
1146 mtspr(SPRN_FSCR, new_thread->fscr);
1147
Anton Blanchard152d5232015-10-29 11:43:55 +11001148 if (old_thread->tar != new_thread->tar)
1149 mtspr(SPRN_TAR, new_thread->tar);
1150 }
Sukadev Bhattiproluec233ed2017-11-07 18:23:53 -08001151
Alastair D'Silva3449f192018-05-11 16:12:58 +10001152 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
Sukadev Bhattiproluec233ed2017-11-07 18:23:53 -08001153 old_thread->tidr != new_thread->tidr)
1154 mtspr(SPRN_TIDR, new_thread->tidr);
Anton Blanchard152d5232015-10-29 11:43:55 +11001155#endif
Ram Pai06bb53b2018-01-18 17:50:31 -08001156
1157 thread_pkey_regs_restore(new_thread, old_thread);
Anton Blanchard152d5232015-10-29 11:43:55 +11001158}
1159
Nicholas Piggin07d2a622017-06-09 01:36:09 +10001160#ifdef CONFIG_PPC_BOOK3S_64
1161#define CP_SIZE 128
1162static const u8 dummy_copy_buffer[CP_SIZE] __attribute__((aligned(CP_SIZE)));
1163#endif
1164
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001165struct task_struct *__switch_to(struct task_struct *prev,
1166 struct task_struct *new)
1167{
1168 struct thread_struct *new_thread, *old_thread;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001169 struct task_struct *last;
Peter Zijlstrad6bf29b2011-05-24 17:11:48 -07001170#ifdef CONFIG_PPC_BOOK3S_64
1171 struct ppc64_tlb_batch *batch;
1172#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001173
Anton Blanchard152d5232015-10-29 11:43:55 +11001174 new_thread = &new->thread;
1175 old_thread = &current->thread;
1176
Michael Neuling7ba5fef2013-10-02 17:15:14 +10001177 WARN_ON(!irqs_disabled());
1178
Michael Ellerman4e003742017-10-19 15:08:43 +11001179#ifdef CONFIG_PPC_BOOK3S_64
Christoph Lameter69111ba2014-10-21 15:23:25 -05001180 batch = this_cpu_ptr(&ppc64_tlb_batch);
Peter Zijlstrad6bf29b2011-05-24 17:11:48 -07001181 if (batch->active) {
1182 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1183 if (batch->index)
1184 __flush_tlb_pending(batch);
1185 batch->active = 0;
1186 }
Michael Ellerman4e003742017-10-19 15:08:43 +11001187#endif /* CONFIG_PPC_BOOK3S_64 */
Paul Mackerras06d67d52005-10-10 22:29:05 +10001188
Anton Blanchardf3d885c2015-10-29 11:44:10 +11001189#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1190 switch_booke_debug_regs(&new->thread.debug);
1191#else
1192/*
1193 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1194 * schedule DABR
1195 */
1196#ifndef CONFIG_HAVE_HW_BREAKPOINT
1197 if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
1198 __set_breakpoint(&new->thread.hw_brk);
1199#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1200#endif
1201
1202 /*
1203 * We need to save SPRs before treclaim/trecheckpoint as these will
1204 * change a number of them.
1205 */
1206 save_sprs(&prev->thread);
1207
Anton Blanchardf3d885c2015-10-29 11:44:10 +11001208 /* Save FPU, Altivec, VSX and SPE state */
1209 giveup_all(prev);
1210
Cyril Burdc310662016-09-23 16:18:24 +10001211 __switch_to_tm(prev, new);
1212
Nicholas Piggine4c0fc52017-06-09 01:36:06 +10001213 if (!radix_enabled()) {
1214 /*
1215 * We can't take a PMU exception inside _switch() since there
1216 * is a window where the kernel stack SLB and the kernel stack
1217 * are out of sync. Hard disable here.
1218 */
1219 hard_irq_disable();
1220 }
Michael Neulingbc2a9402013-02-13 16:21:40 +00001221
Anton Blanchard20dbe672015-12-10 20:44:39 +11001222 /*
1223 * Call restore_sprs() before calling _switch(). If we move it after
1224 * _switch() then we miss out on calling it for new tasks. The reason
1225 * for this is we manually create a stack frame for new tasks that
1226 * directly returns through ret_from_fork() or
1227 * ret_from_kernel_thread(). See copy_thread() for details.
1228 */
Anton Blanchardf3d885c2015-10-29 11:44:10 +11001229 restore_sprs(old_thread, new_thread);
1230
Anton Blanchard20dbe672015-12-10 20:44:39 +11001231 last = _switch(old_thread, new_thread);
1232
Michael Ellerman4e003742017-10-19 15:08:43 +11001233#ifdef CONFIG_PPC_BOOK3S_64
Peter Zijlstrad6bf29b2011-05-24 17:11:48 -07001234 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1235 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
Christoph Lameter69111ba2014-10-21 15:23:25 -05001236 batch = this_cpu_ptr(&ppc64_tlb_batch);
Peter Zijlstrad6bf29b2011-05-24 17:11:48 -07001237 batch->active = 1;
1238 }
Cyril Bur70fe3d92016-02-29 17:53:47 +11001239
Nicholas Piggin07d2a622017-06-09 01:36:09 +10001240 if (current_thread_info()->task->thread.regs) {
Cyril Bur70fe3d92016-02-29 17:53:47 +11001241 restore_math(current_thread_info()->task->thread.regs);
Nicholas Piggin07d2a622017-06-09 01:36:09 +10001242
1243 /*
1244 * The copy-paste buffer can only store into foreign real
1245 * addresses, so unprivileged processes can not see the
1246 * data or use it in any way unless they have foreign real
Sukadev Bhattiprolu9d2a4d72017-11-07 18:23:54 -08001247 * mappings. If the new process has the foreign real address
1248 * mappings, we must issue a cp_abort to clear any state and
1249 * prevent snooping, corruption or a covert channel.
Nicholas Piggin07d2a622017-06-09 01:36:09 +10001250 */
Nicholas Piggin2bf10712018-07-05 18:47:00 +10001251 if (current_thread_info()->task->thread.used_vas)
Sukadev Bhattiprolu9d2a4d72017-11-07 18:23:54 -08001252 asm volatile(PPC_CP_ABORT);
Nicholas Piggin07d2a622017-06-09 01:36:09 +10001253 }
Michael Ellerman4e003742017-10-19 15:08:43 +11001254#endif /* CONFIG_PPC_BOOK3S_64 */
Peter Zijlstrad6bf29b2011-05-24 17:11:48 -07001255
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001256 return last;
1257}
1258
Paul Mackerras06d67d52005-10-10 22:29:05 +10001259static int instructions_to_print = 16;
1260
Paul Mackerras06d67d52005-10-10 22:29:05 +10001261static void show_instructions(struct pt_regs *regs)
1262{
1263 int i;
1264 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1265 sizeof(int));
1266
1267 printk("Instruction dump:");
1268
1269 for (i = 0; i < instructions_to_print; i++) {
1270 int instr;
1271
1272 if (!(i % 8))
Andrew Donnellan2ffd04d2016-11-04 17:20:40 +11001273 pr_cont("\n");
Paul Mackerras06d67d52005-10-10 22:29:05 +10001274
Scott Wood0de2d822007-09-28 04:38:55 +10001275#if !defined(CONFIG_BOOKE)
1276 /* If executing with the IMMU off, adjust pc rather
1277 * than print XXXXXXXX.
1278 */
1279 if (!(regs->msr & MSR_IR))
1280 pc = (unsigned long)phys_to_virt(pc);
1281#endif
1282
Anton Blanchard00ae36d2006-10-13 12:17:16 +10001283 if (!__kernel_text_address(pc) ||
Anton Blanchard7b051f62014-10-13 20:27:15 +11001284 probe_kernel_address((unsigned int __user *)pc, instr)) {
Andrew Donnellan2ffd04d2016-11-04 17:20:40 +11001285 pr_cont("XXXXXXXX ");
Paul Mackerras06d67d52005-10-10 22:29:05 +10001286 } else {
1287 if (regs->nip == pc)
Andrew Donnellan2ffd04d2016-11-04 17:20:40 +11001288 pr_cont("<%08x> ", instr);
Paul Mackerras06d67d52005-10-10 22:29:05 +10001289 else
Andrew Donnellan2ffd04d2016-11-04 17:20:40 +11001290 pr_cont("%08x ", instr);
Paul Mackerras06d67d52005-10-10 22:29:05 +10001291 }
1292
1293 pc += sizeof(int);
1294 }
1295
Andrew Donnellan2ffd04d2016-11-04 17:20:40 +11001296 pr_cont("\n");
Paul Mackerras06d67d52005-10-10 22:29:05 +10001297}
1298
Murilo Opsfelder Araujo88b0fe12018-08-01 18:33:19 -03001299void show_user_instructions(struct pt_regs *regs)
1300{
1301 unsigned long pc;
1302 int i;
1303
1304 pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
1305
Michael Ellermana932ed32018-10-05 16:43:55 +10001306 /*
1307 * Make sure the NIP points at userspace, not kernel text/data or
1308 * elsewhere.
1309 */
1310 if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
1311 pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
1312 current->comm, current->pid);
1313 return;
1314 }
1315
Murilo Opsfelder Araujo88b0fe12018-08-01 18:33:19 -03001316 pr_info("%s[%d]: code: ", current->comm, current->pid);
1317
1318 for (i = 0; i < instructions_to_print; i++) {
1319 int instr;
1320
1321 if (!(i % 8) && (i > 0)) {
1322 pr_cont("\n");
1323 pr_info("%s[%d]: code: ", current->comm, current->pid);
1324 }
1325
1326 if (probe_kernel_address((unsigned int __user *)pc, instr)) {
1327 pr_cont("XXXXXXXX ");
1328 } else {
1329 if (regs->nip == pc)
1330 pr_cont("<%08x> ", instr);
1331 else
1332 pr_cont("%08x ", instr);
1333 }
1334
1335 pc += sizeof(int);
1336 }
1337
1338 pr_cont("\n");
1339}
1340
Michael Neuling801c0b22015-11-20 15:15:32 +11001341struct regbit {
Paul Mackerras06d67d52005-10-10 22:29:05 +10001342 unsigned long bit;
1343 const char *name;
Michael Neuling801c0b22015-11-20 15:15:32 +11001344};
1345
1346static struct regbit msr_bits[] = {
Anton Blanchard3bfd0c9c2011-11-24 19:35:57 +00001347#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1348 {MSR_SF, "SF"},
1349 {MSR_HV, "HV"},
1350#endif
1351 {MSR_VEC, "VEC"},
1352 {MSR_VSX, "VSX"},
1353#ifdef CONFIG_BOOKE
1354 {MSR_CE, "CE"},
1355#endif
Paul Mackerras06d67d52005-10-10 22:29:05 +10001356 {MSR_EE, "EE"},
1357 {MSR_PR, "PR"},
1358 {MSR_FP, "FP"},
1359 {MSR_ME, "ME"},
Anton Blanchard3bfd0c9c2011-11-24 19:35:57 +00001360#ifdef CONFIG_BOOKE
Kumar Gala1b983262008-11-19 04:39:53 +00001361 {MSR_DE, "DE"},
Anton Blanchard3bfd0c9c2011-11-24 19:35:57 +00001362#else
1363 {MSR_SE, "SE"},
1364 {MSR_BE, "BE"},
1365#endif
Paul Mackerras06d67d52005-10-10 22:29:05 +10001366 {MSR_IR, "IR"},
1367 {MSR_DR, "DR"},
Anton Blanchard3bfd0c9c2011-11-24 19:35:57 +00001368 {MSR_PMM, "PMM"},
1369#ifndef CONFIG_BOOKE
1370 {MSR_RI, "RI"},
1371 {MSR_LE, "LE"},
1372#endif
Paul Mackerras06d67d52005-10-10 22:29:05 +10001373 {0, NULL}
1374};
1375
Michael Neuling801c0b22015-11-20 15:15:32 +11001376static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
Paul Mackerras06d67d52005-10-10 22:29:05 +10001377{
Michael Neuling801c0b22015-11-20 15:15:32 +11001378 const char *s = "";
Paul Mackerras06d67d52005-10-10 22:29:05 +10001379
Paul Mackerras06d67d52005-10-10 22:29:05 +10001380 for (; bits->bit; ++bits)
1381 if (val & bits->bit) {
Michael Ellermandb5ba5a2016-11-02 22:20:47 +11001382 pr_cont("%s%s", s, bits->name);
Michael Neuling801c0b22015-11-20 15:15:32 +11001383 s = sep;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001384 }
Michael Neuling801c0b22015-11-20 15:15:32 +11001385}
1386
1387#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1388static struct regbit msr_tm_bits[] = {
1389 {MSR_TS_T, "T"},
1390 {MSR_TS_S, "S"},
1391 {MSR_TM, "E"},
1392 {0, NULL}
1393};
1394
1395static void print_tm_bits(unsigned long val)
1396{
1397/*
1398 * This only prints something if at least one of the TM bit is set.
1399 * Inside the TM[], the output means:
1400 * E: Enabled (bit 32)
1401 * S: Suspended (bit 33)
1402 * T: Transactional (bit 34)
1403 */
1404 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
Michael Ellermandb5ba5a2016-11-02 22:20:47 +11001405 pr_cont(",TM[");
Michael Neuling801c0b22015-11-20 15:15:32 +11001406 print_bits(val, msr_tm_bits, "");
Michael Ellermandb5ba5a2016-11-02 22:20:47 +11001407 pr_cont("]");
Michael Neuling801c0b22015-11-20 15:15:32 +11001408 }
1409}
1410#else
1411static void print_tm_bits(unsigned long val) {}
1412#endif
1413
1414static void print_msr_bits(unsigned long val)
1415{
Michael Ellermandb5ba5a2016-11-02 22:20:47 +11001416 pr_cont("<");
Michael Neuling801c0b22015-11-20 15:15:32 +11001417 print_bits(val, msr_bits, ",");
1418 print_tm_bits(val);
Michael Ellermandb5ba5a2016-11-02 22:20:47 +11001419 pr_cont(">");
Paul Mackerras06d67d52005-10-10 22:29:05 +10001420}
1421
1422#ifdef CONFIG_PPC64
anton@samba.orgf6f7dde2007-03-20 20:38:19 -05001423#define REG "%016lx"
Paul Mackerras06d67d52005-10-10 22:29:05 +10001424#define REGS_PER_LINE 4
1425#define LAST_VOLATILE 13
1426#else
anton@samba.orgf6f7dde2007-03-20 20:38:19 -05001427#define REG "%08lx"
Paul Mackerras06d67d52005-10-10 22:29:05 +10001428#define REGS_PER_LINE 8
1429#define LAST_VOLATILE 12
1430#endif
1431
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001432void show_regs(struct pt_regs * regs)
1433{
1434 int i, trap;
1435
Tejun Heoa43cb952013-04-30 15:27:17 -07001436 show_regs_print_info(KERN_DEFAULT);
1437
Michael Ellermana6036102017-08-23 23:56:24 +10001438 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
Paul Mackerras06d67d52005-10-10 22:29:05 +10001439 regs->nip, regs->link, regs->ctr);
Michael Ellerman182dc9c2017-12-18 16:33:36 +11001440 printk("REGS: %px TRAP: %04lx %s (%s)\n",
Serge E. Hallyn96b644b2006-10-02 02:18:13 -07001441 regs, regs->trap, print_tainted(), init_utsname()->release);
Michael Ellermana6036102017-08-23 23:56:24 +10001442 printk("MSR: "REG" ", regs->msr);
Michael Neuling801c0b22015-11-20 15:15:32 +11001443 print_msr_bits(regs->msr);
Michael Ellermanf6fc73f2017-08-23 23:56:23 +10001444 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001445 trap = TRAP(regs);
Benjamin Herrenschmidt2271db22018-01-12 13:28:49 +11001446 if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
Michael Ellerman7dae8652016-11-03 20:45:26 +11001447 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
Anton Blanchardc5400642013-11-15 15:41:19 +11001448 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
Kumar Galaba28c9a2011-10-06 02:53:38 +00001449#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Michael Ellerman7dae8652016-11-03 20:45:26 +11001450 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
Kumar Gala14170782007-07-26 00:46:15 -05001451#else
Michael Ellerman7dae8652016-11-03 20:45:26 +11001452 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
Anton Blanchard9db8bcf2013-11-15 15:48:38 +11001453#endif
1454#ifdef CONFIG_PPC64
Nicholas Piggin3130a7b2018-05-10 11:04:24 +10001455 pr_cont("IRQMASK: %lx ", regs->softe);
Anton Blanchard9db8bcf2013-11-15 15:48:38 +11001456#endif
1457#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Anton Blanchard6d888d12013-11-18 13:19:17 +11001458 if (MSR_TM_ACTIVE(regs->msr))
Michael Ellerman7dae8652016-11-03 20:45:26 +11001459 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
Kumar Gala14170782007-07-26 00:46:15 -05001460#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001461
1462 for (i = 0; i < 32; i++) {
Paul Mackerras06d67d52005-10-10 22:29:05 +10001463 if ((i % REGS_PER_LINE) == 0)
Michael Ellerman7dae8652016-11-03 20:45:26 +11001464 pr_cont("\nGPR%02d: ", i);
1465 pr_cont(REG " ", regs->gpr[i]);
Paul Mackerras06d67d52005-10-10 22:29:05 +10001466 if (i == LAST_VOLATILE && !FULL_REGS(regs))
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001467 break;
1468 }
Michael Ellerman7dae8652016-11-03 20:45:26 +11001469 pr_cont("\n");
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001470#ifdef CONFIG_KALLSYMS
1471 /*
1472 * Lookup NIP late so we have the best change of getting the
1473 * above info out without failing
1474 */
Benjamin Herrenschmidt058c78f2008-07-07 13:44:31 +10001475 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1476 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001477#endif
1478 show_stack(current, (unsigned long *) regs->gpr[1]);
Paul Mackerras06d67d52005-10-10 22:29:05 +10001479 if (!user_mode(regs))
1480 show_instructions(regs);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001481}
1482
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001483void flush_thread(void)
1484{
K.Prasade0780b72011-02-10 04:44:35 +00001485#ifdef CONFIG_HAVE_HW_BREAKPOINT
K.Prasad5aae8a52010-06-15 11:35:19 +05301486 flush_ptrace_hw_breakpoint(current);
K.Prasade0780b72011-02-10 04:44:35 +00001487#else /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00001488 set_debug_reg_defaults(&current->thread);
K.Prasade0780b72011-02-10 04:44:35 +00001489#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001490}
1491
Sukadev Bhattiprolu9d2a4d72017-11-07 18:23:54 -08001492int set_thread_uses_vas(void)
1493{
1494#ifdef CONFIG_PPC_BOOK3S_64
1495 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1496 return -EINVAL;
1497
1498 current->thread.used_vas = 1;
1499
1500 /*
1501 * Even a process that has no foreign real address mapping can use
1502 * an unpaired COPY instruction (to no real effect). Issue CP_ABORT
1503 * to clear any pending COPY and prevent a covert channel.
1504 *
1505 * __switch_to() will issue CP_ABORT on future context switches.
1506 */
1507 asm volatile(PPC_CP_ABORT);
1508
1509#endif /* CONFIG_PPC_BOOK3S_64 */
1510 return 0;
1511}
1512
Sukadev Bhattiproluec233ed2017-11-07 18:23:53 -08001513#ifdef CONFIG_PPC64
Alastair D'Silva71cc64a2018-05-11 16:12:59 +10001514/**
1515 * Assign a TIDR (thread ID) for task @t and set it in the thread
Sukadev Bhattiproluec233ed2017-11-07 18:23:53 -08001516 * structure. For now, we only support setting TIDR for 'current' task.
Alastair D'Silva71cc64a2018-05-11 16:12:59 +10001517 *
1518 * Since the TID value is a truncated form of it PID, it is possible
1519 * (but unlikely) for 2 threads to have the same TID. In the unlikely event
1520 * that 2 threads share the same TID and are waiting, one of the following
1521 * cases will happen:
1522 *
1523 * 1. The correct thread is running, the wrong thread is not
1524 * In this situation, the correct thread is woken and proceeds to pass it's
1525 * condition check.
1526 *
1527 * 2. Neither threads are running
1528 * In this situation, neither thread will be woken. When scheduled, the waiting
1529 * threads will execute either a wait, which will return immediately, followed
1530 * by a condition check, which will pass for the correct thread and fail
1531 * for the wrong thread, or they will execute the condition check immediately.
1532 *
1533 * 3. The wrong thread is running, the correct thread is not
1534 * The wrong thread will be woken, but will fail it's condition check and
1535 * re-execute wait. The correct thread, when scheduled, will execute either
1536 * it's condition check (which will pass), or wait, which returns immediately
1537 * when called the first time after the thread is scheduled, followed by it's
1538 * condition check (which will pass).
1539 *
1540 * 4. Both threads are running
1541 * Both threads will be woken. The wrong thread will fail it's condition check
1542 * and execute another wait, while the correct thread will pass it's condition
1543 * check.
1544 *
1545 * @t: the task to set the thread ID for
Sukadev Bhattiproluec233ed2017-11-07 18:23:53 -08001546 */
1547int set_thread_tidr(struct task_struct *t)
1548{
Alastair D'Silva3449f192018-05-11 16:12:58 +10001549 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
Sukadev Bhattiproluec233ed2017-11-07 18:23:53 -08001550 return -EINVAL;
1551
1552 if (t != current)
1553 return -EINVAL;
1554
Vaibhav Jain7e4d4232017-11-24 14:03:38 +05301555 if (t->thread.tidr)
1556 return 0;
1557
Alastair D'Silva71cc64a2018-05-11 16:12:59 +10001558 t->thread.tidr = (u16)task_pid_nr(t);
Sukadev Bhattiproluec233ed2017-11-07 18:23:53 -08001559 mtspr(SPRN_TIDR, t->thread.tidr);
1560
1561 return 0;
1562}
Christophe Lombardb1db5512018-01-11 09:55:25 +01001563EXPORT_SYMBOL_GPL(set_thread_tidr);
Sukadev Bhattiproluec233ed2017-11-07 18:23:53 -08001564
1565#endif /* CONFIG_PPC64 */
1566
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001567void
1568release_thread(struct task_struct *t)
1569{
1570}
1571
1572/*
Suresh Siddha55ccf3f2012-05-16 15:03:51 -07001573 * this gets called so that we can store coprocessor state into memory and
1574 * copy the current task into the new thread.
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001575 */
Suresh Siddha55ccf3f2012-05-16 15:03:51 -07001576int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001577{
Anton Blanchard579e6332015-10-29 11:44:09 +11001578 flush_all_to_thread(src);
Michael Neuling621b5062014-03-03 14:21:40 +11001579 /*
1580 * Flush TM state out so we can copy it. __switch_to_tm() does this
1581 * flush but it removes the checkpointed state from the current CPU and
1582 * transitions the CPU out of TM mode. Hence we need to call
1583 * tm_recheckpoint_new_task() (on the same task) to restore the
1584 * checkpointed state back and the TM mode.
Cyril Bur5d176f72016-09-14 18:02:16 +10001585 *
1586 * Can't pass dst because it isn't ready. Doesn't matter, passing
1587 * dst is only important for __switch_to()
Michael Neuling621b5062014-03-03 14:21:40 +11001588 */
Cyril Burdc310662016-09-23 16:18:24 +10001589 __switch_to_tm(src, src);
Michael Ellerman330a1eb2013-06-28 18:15:16 +10001590
Suresh Siddha55ccf3f2012-05-16 15:03:51 -07001591 *dst = *src;
Michael Ellerman330a1eb2013-06-28 18:15:16 +10001592
1593 clear_task_ebb(dst);
1594
Suresh Siddha55ccf3f2012-05-16 15:03:51 -07001595 return 0;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001596}
1597
Michael Ellermancec15482014-07-10 12:29:21 +10001598static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1599{
Michael Ellerman4e003742017-10-19 15:08:43 +11001600#ifdef CONFIG_PPC_BOOK3S_64
Michael Ellermancec15482014-07-10 12:29:21 +10001601 unsigned long sp_vsid;
1602 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1603
Aneesh Kumar K.Vcaca2852016-04-29 23:26:07 +10001604 if (radix_enabled())
1605 return;
1606
Michael Ellermancec15482014-07-10 12:29:21 +10001607 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1608 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1609 << SLB_VSID_SHIFT_1T;
1610 else
1611 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1612 << SLB_VSID_SHIFT;
1613 sp_vsid |= SLB_VSID_KERNEL | llp;
1614 p->thread.ksp_vsid = sp_vsid;
1615#endif
1616}
1617
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001618/*
1619 * Copy a thread..
1620 */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +00001621
Alex Dowad6eca8932015-03-13 20:14:46 +02001622/*
1623 * Copy architecture-specific thread state
1624 */
Alexey Dobriyan6f2c55b2009-04-02 16:56:59 -07001625int copy_thread(unsigned long clone_flags, unsigned long usp,
Alex Dowad6eca8932015-03-13 20:14:46 +02001626 unsigned long kthread_arg, struct task_struct *p)
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001627{
1628 struct pt_regs *childregs, *kregs;
1629 extern void ret_from_fork(void);
Al Viro58254e12012-09-12 18:32:42 -04001630 extern void ret_from_kernel_thread(void);
1631 void (*f)(void);
Al Viro0cec6fd2006-01-12 01:06:02 -08001632 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
Michael Ellerman5d31a962016-03-24 22:04:04 +11001633 struct thread_info *ti = task_thread_info(p);
1634
1635 klp_init_thread_info(ti);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001636
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001637 /* Copy registers */
1638 sp -= sizeof(struct pt_regs);
1639 childregs = (struct pt_regs *) sp;
Al Viroab758192012-10-21 22:33:39 -04001640 if (unlikely(p->flags & PF_KTHREAD)) {
Alex Dowad6eca8932015-03-13 20:14:46 +02001641 /* kernel thread */
Al Viro58254e12012-09-12 18:32:42 -04001642 memset(childregs, 0, sizeof(struct pt_regs));
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001643 childregs->gpr[1] = sp + sizeof(struct pt_regs);
Anton Blanchard7cedd602014-02-04 16:08:51 +11001644 /* function */
1645 if (usp)
1646 childregs->gpr[14] = ppc_function_entry((void *)usp);
Al Viro58254e12012-09-12 18:32:42 -04001647#ifdef CONFIG_PPC64
Al Virob5e2fc12006-01-12 01:06:01 -08001648 clear_tsk_thread_flag(p, TIF_32BIT);
Madhavan Srinivasanc2e480b2017-12-20 09:25:42 +05301649 childregs->softe = IRQS_ENABLED;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001650#endif
Alex Dowad6eca8932015-03-13 20:14:46 +02001651 childregs->gpr[15] = kthread_arg;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001652 p->thread.regs = NULL; /* no user register state */
Al Viro138d1ce2012-10-11 08:41:43 -04001653 ti->flags |= _TIF_RESTOREALL;
Al Viro58254e12012-09-12 18:32:42 -04001654 f = ret_from_kernel_thread;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001655 } else {
Alex Dowad6eca8932015-03-13 20:14:46 +02001656 /* user thread */
Al Viroafa86fc2012-10-22 22:51:14 -04001657 struct pt_regs *regs = current_pt_regs();
Al Viro58254e12012-09-12 18:32:42 -04001658 CHECK_FULL_REGS(regs);
1659 *childregs = *regs;
Al Viroea516b12012-10-21 22:28:43 -04001660 if (usp)
1661 childregs->gpr[1] = usp;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001662 p->thread.regs = childregs;
Al Viro58254e12012-09-12 18:32:42 -04001663 childregs->gpr[3] = 0; /* Result from fork() */
Paul Mackerras06d67d52005-10-10 22:29:05 +10001664 if (clone_flags & CLONE_SETTLS) {
1665#ifdef CONFIG_PPC64
Denis Kirjanov9904b002010-07-29 22:04:39 +00001666 if (!is_32bit_task())
Paul Mackerras06d67d52005-10-10 22:29:05 +10001667 childregs->gpr[13] = childregs->gpr[6];
1668 else
1669#endif
1670 childregs->gpr[2] = childregs->gpr[6];
1671 }
Al Viro58254e12012-09-12 18:32:42 -04001672
1673 f = ret_from_fork;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001674 }
Cyril Burd272f662016-02-29 17:53:46 +11001675 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001676 sp -= STACK_FRAME_OVERHEAD;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001677
1678 /*
1679 * The way this works is that at some point in the future
1680 * some task will call _switch to switch to the new task.
1681 * That will pop off the stack frame created below and start
1682 * the new task running at ret_from_fork. The new task will
1683 * do some house keeping and then return from the fork or clone
1684 * system call, using the stack frame created above.
1685 */
Li Zhongaf945cf2013-05-06 22:44:41 +00001686 ((unsigned long *)sp)[0] = 0;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001687 sp -= sizeof(struct pt_regs);
1688 kregs = (struct pt_regs *) sp;
1689 sp -= STACK_FRAME_OVERHEAD;
1690 p->thread.ksp = sp;
Benjamin Herrenschmidtcbc95652013-09-24 15:17:21 +10001691#ifdef CONFIG_PPC32
Kumar Gala85218822008-04-28 16:21:22 +10001692 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1693 _ALIGN_UP(sizeof(struct thread_info), 16);
Benjamin Herrenschmidtcbc95652013-09-24 15:17:21 +10001694#endif
Oleg Nesterov28d170ab2013-04-21 06:47:59 +00001695#ifdef CONFIG_HAVE_HW_BREAKPOINT
1696 p->thread.ptrace_bps[0] = NULL;
1697#endif
1698
Paul Mackerras18461962013-09-10 20:21:10 +10001699 p->thread.fp_save_area = NULL;
1700#ifdef CONFIG_ALTIVEC
1701 p->thread.vr_save_area = NULL;
1702#endif
1703
Michael Ellermancec15482014-07-10 12:29:21 +10001704 setup_ksp_vsid(p, sp);
Paul Mackerras06d67d52005-10-10 22:29:05 +10001705
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +00001706#ifdef CONFIG_PPC64
1707 if (cpu_has_feature(CPU_FTR_DSCR)) {
Anton Blanchard1021cb22012-09-03 16:49:47 +00001708 p->thread.dscr_inherit = current->thread.dscr_inherit;
Anton Blancharddb1231dc2015-12-09 20:11:47 +11001709 p->thread.dscr = mfspr(SPRN_DSCR);
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +00001710 }
Haren Myneni92779242012-12-06 21:49:56 +00001711 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1712 p->thread.ppr = INIT_PPR;
Sukadev Bhattiproluec233ed2017-11-07 18:23:53 -08001713
1714 p->thread.tidr = 0;
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +00001715#endif
Anton Blanchard7cedd602014-02-04 16:08:51 +11001716 kregs->nip = ppc_function_entry(f);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001717 return 0;
1718}
1719
1720/*
1721 * Set up a thread for executing a new program
1722 */
Paul Mackerras06d67d52005-10-10 22:29:05 +10001723void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001724{
Michael Ellerman90eac722005-10-21 16:01:33 +10001725#ifdef CONFIG_PPC64
1726 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1727#endif
1728
Paul Mackerras06d67d52005-10-10 22:29:05 +10001729 /*
1730 * If we exec out of a kernel thread then thread.regs will not be
1731 * set. Do it now.
1732 */
1733 if (!current->thread.regs) {
Al Viro0cec6fd2006-01-12 01:06:02 -08001734 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1735 current->thread.regs = regs - 1;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001736 }
1737
Cyril Bur8e96a872016-06-17 14:58:34 +10001738#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1739 /*
1740 * Clear any transactional state, we're exec()ing. The cause is
1741 * not important as there will never be a recheckpoint so it's not
1742 * user visible.
1743 */
1744 if (MSR_TM_SUSPENDED(mfmsr()))
1745 tm_reclaim_current(0);
1746#endif
1747
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001748 memset(regs->gpr, 0, sizeof(regs->gpr));
1749 regs->ctr = 0;
1750 regs->link = 0;
1751 regs->xer = 0;
1752 regs->ccr = 0;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001753 regs->gpr[1] = sp;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001754
Roland McGrath474f8192007-09-24 16:52:44 -07001755 /*
1756 * We have just cleared all the nonvolatile GPRs, so make
1757 * FULL_REGS(regs) return true. This is necessary to allow
1758 * ptrace to examine the thread immediately after exec.
1759 */
1760 regs->trap &= ~1UL;
1761
Paul Mackerras06d67d52005-10-10 22:29:05 +10001762#ifdef CONFIG_PPC32
1763 regs->mq = 0;
1764 regs->nip = start;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001765 regs->msr = MSR_USER;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001766#else
Denis Kirjanov9904b002010-07-29 22:04:39 +00001767 if (!is_32bit_task()) {
Rusty Russell94af3ab2013-11-20 22:15:02 +11001768 unsigned long entry;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001769
Rusty Russell94af3ab2013-11-20 22:15:02 +11001770 if (is_elf2_task()) {
1771 /* Look ma, no function descriptors! */
1772 entry = start;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001773
Rusty Russell94af3ab2013-11-20 22:15:02 +11001774 /*
1775 * Ulrich says:
1776 * The latest iteration of the ABI requires that when
1777 * calling a function (at its global entry point),
1778 * the caller must ensure r12 holds the entry point
1779 * address (so that the function can quickly
1780 * establish addressability).
1781 */
1782 regs->gpr[12] = start;
1783 /* Make sure that's restored on entry to userspace. */
1784 set_thread_flag(TIF_RESTOREALL);
1785 } else {
1786 unsigned long toc;
1787
1788 /* start is a relocated pointer to the function
1789 * descriptor for the elf _start routine. The first
1790 * entry in the function descriptor is the entry
1791 * address of _start and the second entry is the TOC
1792 * value we need to use.
1793 */
1794 __get_user(entry, (unsigned long __user *)start);
1795 __get_user(toc, (unsigned long __user *)start+1);
1796
1797 /* Check whether the e_entry function descriptor entries
1798 * need to be relocated before we can use them.
1799 */
1800 if (load_addr != 0) {
1801 entry += load_addr;
1802 toc += load_addr;
1803 }
1804 regs->gpr[2] = toc;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001805 }
1806 regs->nip = entry;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001807 regs->msr = MSR_USER64;
Stephen Rothwelld4bf9a72005-10-13 13:40:54 +10001808 } else {
1809 regs->nip = start;
1810 regs->gpr[2] = 0;
1811 regs->msr = MSR_USER32;
Paul Mackerras06d67d52005-10-10 22:29:05 +10001812 }
1813#endif
Michael Neulingce48b212008-06-25 14:07:18 +10001814#ifdef CONFIG_VSX
1815 current->thread.used_vsr = 0;
1816#endif
Breno Leitao11958922017-06-02 18:43:30 -03001817 current->thread.load_fp = 0;
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10001818 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
Paul Mackerras18461962013-09-10 20:21:10 +10001819 current->thread.fp_save_area = NULL;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001820#ifdef CONFIG_ALTIVEC
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10001821 memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1822 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
Paul Mackerras18461962013-09-10 20:21:10 +10001823 current->thread.vr_save_area = NULL;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001824 current->thread.vrsave = 0;
1825 current->thread.used_vr = 0;
Breno Leitao11958922017-06-02 18:43:30 -03001826 current->thread.load_vec = 0;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001827#endif /* CONFIG_ALTIVEC */
1828#ifdef CONFIG_SPE
1829 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1830 current->thread.acc = 0;
1831 current->thread.spefscr = 0;
1832 current->thread.used_spe = 0;
1833#endif /* CONFIG_SPE */
Michael Neulingbc2a9402013-02-13 16:21:40 +00001834#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Michael Neulingbc2a9402013-02-13 16:21:40 +00001835 current->thread.tm_tfhar = 0;
1836 current->thread.tm_texasr = 0;
1837 current->thread.tm_tfiar = 0;
Breno Leitao7f22ced2017-06-05 11:40:59 -03001838 current->thread.load_tm = 0;
Michael Neulingbc2a9402013-02-13 16:21:40 +00001839#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Ram Pai06bb53b2018-01-18 17:50:31 -08001840
1841 thread_pkey_regs_init(&current->thread);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001842}
Anton Blancharde1802b02014-08-20 08:00:02 +10001843EXPORT_SYMBOL(start_thread);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001844
1845#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1846 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1847
1848int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1849{
1850 struct pt_regs *regs = tsk->thread.regs;
1851
1852 /* This is a bit hairy. If we are an SPE enabled processor
1853 * (have embedded fp) we store the IEEE exception enable flags in
1854 * fpexc_mode. fpexc_mode is also used for setting FP exception
1855 * mode (asyn, precise, disabled) for 'Classic' FP. */
1856 if (val & PR_FP_EXC_SW_ENABLE) {
1857#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -05001858 if (cpu_has_feature(CPU_FTR_SPE)) {
Joseph Myers640e9222013-12-10 23:07:45 +00001859 /*
1860 * When the sticky exception bits are set
1861 * directly by userspace, it must call prctl
1862 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1863 * in the existing prctl settings) or
1864 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1865 * the bits being set). <fenv.h> functions
1866 * saving and restoring the whole
1867 * floating-point environment need to do so
1868 * anyway to restore the prctl settings from
1869 * the saved environment.
1870 */
1871 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
Kumar Gala5e14d212007-09-13 01:44:20 -05001872 tsk->thread.fpexc_mode = val &
1873 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1874 return 0;
1875 } else {
1876 return -EINVAL;
1877 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001878#else
1879 return -EINVAL;
1880#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001881 }
Paul Mackerras06d67d52005-10-10 22:29:05 +10001882
1883 /* on a CONFIG_SPE this does not hurt us. The bits that
1884 * __pack_fe01 use do not overlap with bits used for
1885 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1886 * on CONFIG_SPE implementations are reserved so writing to
1887 * them does not change anything */
1888 if (val > PR_FP_EXC_PRECISE)
1889 return -EINVAL;
1890 tsk->thread.fpexc_mode = __pack_fe01(val);
1891 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1892 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1893 | tsk->thread.fpexc_mode;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001894 return 0;
1895}
1896
1897int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1898{
1899 unsigned int val;
1900
1901 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1902#ifdef CONFIG_SPE
Joseph Myers640e9222013-12-10 23:07:45 +00001903 if (cpu_has_feature(CPU_FTR_SPE)) {
1904 /*
1905 * When the sticky exception bits are set
1906 * directly by userspace, it must call prctl
1907 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1908 * in the existing prctl settings) or
1909 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1910 * the bits being set). <fenv.h> functions
1911 * saving and restoring the whole
1912 * floating-point environment need to do so
1913 * anyway to restore the prctl settings from
1914 * the saved environment.
1915 */
1916 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
Kumar Gala5e14d212007-09-13 01:44:20 -05001917 val = tsk->thread.fpexc_mode;
Joseph Myers640e9222013-12-10 23:07:45 +00001918 } else
Kumar Gala5e14d212007-09-13 01:44:20 -05001919 return -EINVAL;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001920#else
1921 return -EINVAL;
1922#endif
1923 else
1924 val = __unpack_fe01(tsk->thread.fpexc_mode);
1925 return put_user(val, (unsigned int __user *) adr);
1926}
1927
Paul Mackerrasfab5db92006-06-07 16:14:40 +10001928int set_endian(struct task_struct *tsk, unsigned int val)
1929{
1930 struct pt_regs *regs = tsk->thread.regs;
1931
1932 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1933 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1934 return -EINVAL;
1935
1936 if (regs == NULL)
1937 return -EINVAL;
1938
1939 if (val == PR_ENDIAN_BIG)
1940 regs->msr &= ~MSR_LE;
1941 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1942 regs->msr |= MSR_LE;
1943 else
1944 return -EINVAL;
1945
1946 return 0;
1947}
1948
1949int get_endian(struct task_struct *tsk, unsigned long adr)
1950{
1951 struct pt_regs *regs = tsk->thread.regs;
1952 unsigned int val;
1953
1954 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1955 !cpu_has_feature(CPU_FTR_REAL_LE))
1956 return -EINVAL;
1957
1958 if (regs == NULL)
1959 return -EINVAL;
1960
1961 if (regs->msr & MSR_LE) {
1962 if (cpu_has_feature(CPU_FTR_REAL_LE))
1963 val = PR_ENDIAN_LITTLE;
1964 else
1965 val = PR_ENDIAN_PPC_LITTLE;
1966 } else
1967 val = PR_ENDIAN_BIG;
1968
1969 return put_user(val, (unsigned int __user *)adr);
1970}
1971
Paul Mackerrase9370ae2006-06-07 16:15:39 +10001972int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1973{
1974 tsk->thread.align_ctl = val;
1975 return 0;
1976}
1977
1978int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1979{
1980 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1981}
1982
Paul Mackerrasbb72c482007-02-19 11:42:42 +11001983static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1984 unsigned long nbytes)
1985{
1986 unsigned long stack_page;
1987 unsigned long cpu = task_cpu(p);
1988
1989 /*
1990 * Avoid crashing if the stack has overflowed and corrupted
1991 * task_cpu(p), which is in the thread_info struct.
1992 */
1993 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1994 stack_page = (unsigned long) hardirq_ctx[cpu];
1995 if (sp >= stack_page + sizeof(struct thread_struct)
1996 && sp <= stack_page + THREAD_SIZE - nbytes)
1997 return 1;
1998
1999 stack_page = (unsigned long) softirq_ctx[cpu];
2000 if (sp >= stack_page + sizeof(struct thread_struct)
2001 && sp <= stack_page + THREAD_SIZE - nbytes)
2002 return 1;
2003 }
2004 return 0;
2005}
2006
Anton Blanchard2f251942006-03-27 11:46:18 +11002007int validate_sp(unsigned long sp, struct task_struct *p,
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002008 unsigned long nbytes)
2009{
Al Viro0cec6fd2006-01-12 01:06:02 -08002010 unsigned long stack_page = (unsigned long)task_stack_page(p);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002011
2012 if (sp >= stack_page + sizeof(struct thread_struct)
2013 && sp <= stack_page + THREAD_SIZE - nbytes)
2014 return 1;
2015
Paul Mackerrasbb72c482007-02-19 11:42:42 +11002016 return valid_irq_stack(sp, p, nbytes);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002017}
2018
Anton Blanchard2f251942006-03-27 11:46:18 +11002019EXPORT_SYMBOL(validate_sp);
2020
Paul Mackerras06d67d52005-10-10 22:29:05 +10002021unsigned long get_wchan(struct task_struct *p)
2022{
2023 unsigned long ip, sp;
2024 int count = 0;
2025
2026 if (!p || p == current || p->state == TASK_RUNNING)
2027 return 0;
2028
2029 sp = p->thread.ksp;
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10002030 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
Paul Mackerras06d67d52005-10-10 22:29:05 +10002031 return 0;
2032
2033 do {
2034 sp = *(unsigned long *)sp;
Kautuk Consul4ca360f2016-04-19 15:48:21 +05302035 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2036 p->state == TASK_RUNNING)
Paul Mackerras06d67d52005-10-10 22:29:05 +10002037 return 0;
2038 if (count > 0) {
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10002039 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
Paul Mackerras06d67d52005-10-10 22:29:05 +10002040 if (!in_sched_functions(ip))
2041 return ip;
2042 }
2043 } while (count++ < 16);
2044 return 0;
2045}
Paul Mackerras06d67d52005-10-10 22:29:05 +10002046
Johannes Bergc4d04be2008-11-20 03:24:07 +00002047static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002048
2049void show_stack(struct task_struct *tsk, unsigned long *stack)
2050{
Paul Mackerras06d67d52005-10-10 22:29:05 +10002051 unsigned long sp, ip, lr, newsp;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002052 int count = 0;
Paul Mackerras06d67d52005-10-10 22:29:05 +10002053 int firstframe = 1;
Steven Rostedt6794c782009-02-09 21:10:27 -08002054#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2055 int curr_frame = current->curr_ret_stack;
2056 extern void return_to_handler(void);
Steven Rostedt9135c3c2009-09-15 08:20:15 -07002057 unsigned long rth = (unsigned long)return_to_handler;
Steven Rostedt6794c782009-02-09 21:10:27 -08002058#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002059
2060 sp = (unsigned long) stack;
2061 if (tsk == NULL)
2062 tsk = current;
2063 if (sp == 0) {
2064 if (tsk == current)
Anton Blanchardacf620e2014-10-13 19:41:39 +11002065 sp = current_stack_pointer();
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002066 else
2067 sp = tsk->thread.ksp;
2068 }
2069
Paul Mackerras06d67d52005-10-10 22:29:05 +10002070 lr = 0;
2071 printk("Call Trace:\n");
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002072 do {
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10002073 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
Paul Mackerras06d67d52005-10-10 22:29:05 +10002074 return;
2075
2076 stack = (unsigned long *) sp;
2077 newsp = stack[0];
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10002078 ip = stack[STACK_FRAME_LR_SAVE];
Paul Mackerras06d67d52005-10-10 22:29:05 +10002079 if (!firstframe || ip != lr) {
Benjamin Herrenschmidt058c78f2008-07-07 13:44:31 +10002080 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
Steven Rostedt6794c782009-02-09 21:10:27 -08002081#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Anton Blanchard7d56c652014-09-17 17:07:03 +10002082 if ((ip == rth) && curr_frame >= 0) {
Michael Ellerman9a1f4902016-11-02 22:20:46 +11002083 pr_cont(" (%pS)",
Steven Rostedt6794c782009-02-09 21:10:27 -08002084 (void *)current->ret_stack[curr_frame].ret);
2085 curr_frame--;
2086 }
2087#endif
Paul Mackerras06d67d52005-10-10 22:29:05 +10002088 if (firstframe)
Michael Ellerman9a1f4902016-11-02 22:20:46 +11002089 pr_cont(" (unreliable)");
2090 pr_cont("\n");
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002091 }
Paul Mackerras06d67d52005-10-10 22:29:05 +10002092 firstframe = 0;
2093
2094 /*
2095 * See if this is an exception frame.
2096 * We look for the "regshere" marker in the current frame.
2097 */
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +10002098 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2099 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
Paul Mackerras06d67d52005-10-10 22:29:05 +10002100 struct pt_regs *regs = (struct pt_regs *)
2101 (sp + STACK_FRAME_OVERHEAD);
Paul Mackerras06d67d52005-10-10 22:29:05 +10002102 lr = regs->link;
Paul Mackerras9be9be22014-06-12 16:53:08 +10002103 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
Benjamin Herrenschmidt058c78f2008-07-07 13:44:31 +10002104 regs->trap, (void *)regs->nip, (void *)lr);
Paul Mackerras06d67d52005-10-10 22:29:05 +10002105 firstframe = 1;
2106 }
2107
2108 sp = newsp;
2109 } while (count++ < kstack_depth_to_print);
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002110}
Paul Mackerras06d67d52005-10-10 22:29:05 +10002111
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11002112#ifdef CONFIG_PPC64
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11002113/* Called with hard IRQs off */
Michael Ellerman0e377392013-06-13 21:04:56 +10002114void notrace __ppc64_runlatch_on(void)
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11002115{
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11002116 struct thread_info *ti = current_thread_info();
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11002117
Nicholas Piggind1d0d5f2017-08-12 02:39:07 +10002118 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2119 /*
2120 * Least significant bit (RUN) is the only writable bit of
2121 * the CTRL register, so we can avoid mfspr. 2.06 is not the
2122 * earliest ISA where this is the case, but it's convenient.
2123 */
2124 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2125 } else {
2126 unsigned long ctrl;
2127
2128 /*
2129 * Some architectures (e.g., Cell) have writable fields other
2130 * than RUN, so do the read-modify-write.
2131 */
2132 ctrl = mfspr(SPRN_CTRLF);
2133 ctrl |= CTRL_RUNLATCH;
2134 mtspr(SPRN_CTRLT, ctrl);
2135 }
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11002136
Benjamin Herrenschmidtfae2e0f2012-04-11 10:42:15 +10002137 ti->local_flags |= _TLF_RUNLATCH;
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11002138}
2139
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11002140/* Called with hard IRQs off */
Michael Ellerman0e377392013-06-13 21:04:56 +10002141void notrace __ppc64_runlatch_off(void)
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11002142{
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11002143 struct thread_info *ti = current_thread_info();
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11002144
Benjamin Herrenschmidtfae2e0f2012-04-11 10:42:15 +10002145 ti->local_flags &= ~_TLF_RUNLATCH;
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11002146
Nicholas Piggind1d0d5f2017-08-12 02:39:07 +10002147 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2148 mtspr(SPRN_CTRLT, 0);
2149 } else {
2150 unsigned long ctrl;
2151
2152 ctrl = mfspr(SPRN_CTRLF);
2153 ctrl &= ~CTRL_RUNLATCH;
2154 mtspr(SPRN_CTRLT, ctrl);
2155 }
Anton Blanchardcb2c9b22006-02-13 14:48:35 +11002156}
Benjamin Herrenschmidtfe1952f2012-03-01 12:45:27 +11002157#endif /* CONFIG_PPC64 */
Benjamin Herrenschmidtf6a61682008-04-18 16:56:17 +10002158
Anton Blanchardd8390882009-02-22 01:50:03 +00002159unsigned long arch_align_stack(unsigned long sp)
2160{
2161 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2162 sp -= get_random_int() & ~PAGE_MASK;
2163 return sp & ~0xf;
2164}
Anton Blanchard912f9ee2009-02-22 01:50:04 +00002165
2166static inline unsigned long brk_rnd(void)
2167{
2168 unsigned long rnd = 0;
2169
2170 /* 8MB for 32bit, 1GB for 64bit */
2171 if (is_32bit_task())
Daniel Cashman5ef11c32016-02-26 15:19:37 -08002172 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
Anton Blanchard912f9ee2009-02-22 01:50:04 +00002173 else
Daniel Cashman5ef11c32016-02-26 15:19:37 -08002174 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
Anton Blanchard912f9ee2009-02-22 01:50:04 +00002175
2176 return rnd << PAGE_SHIFT;
2177}
2178
2179unsigned long arch_randomize_brk(struct mm_struct *mm)
2180{
Anton Blanchard8bbde7a2009-09-21 16:52:35 +00002181 unsigned long base = mm->brk;
2182 unsigned long ret;
2183
Michael Ellerman4e003742017-10-19 15:08:43 +11002184#ifdef CONFIG_PPC_BOOK3S_64
Anton Blanchard8bbde7a2009-09-21 16:52:35 +00002185 /*
2186 * If we are using 1TB segments and we are allowed to randomise
2187 * the heap, we can put it above 1TB so it is backed by a 1TB
2188 * segment. Otherwise the heap will be in the bottom 1TB
2189 * which always uses 256MB segments and this may result in a
Aneesh Kumar K.Vcaca2852016-04-29 23:26:07 +10002190 * performance penalty. We don't need to worry about radix. For
2191 * radix, mmu_highuser_ssize remains unchanged from 256MB.
Anton Blanchard8bbde7a2009-09-21 16:52:35 +00002192 */
2193 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2194 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2195#endif
2196
2197 ret = PAGE_ALIGN(base + brk_rnd());
Anton Blanchard912f9ee2009-02-22 01:50:04 +00002198
2199 if (ret < mm->brk)
2200 return mm->brk;
2201
2202 return ret;
2203}
Anton Blanchard501cb162009-02-22 01:50:07 +00002204