blob: 7388f1374d5ffbed7a23373b3fb15206ee2deace [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Ralf Baechle49f2ec92013-05-21 10:53:37 +02002/*
3 * MIPS idle loop and WAIT instruction support.
4 *
5 * Copyright (C) xxxx the Anonymous
6 * Copyright (C) 1994 - 2006 Ralf Baechle
7 * Copyright (C) 2003, 2004 Maciej W. Rozycki
8 * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
Ralf Baechle49f2ec92013-05-21 10:53:37 +02009 */
Paul Burton91955e32017-08-23 11:17:47 -070010#include <linux/cpu.h>
Ralf Baechle49f2ec92013-05-21 10:53:37 +020011#include <linux/export.h>
12#include <linux/init.h>
13#include <linux/irqflags.h>
14#include <linux/printk.h>
15#include <linux/sched.h>
16#include <asm/cpu.h>
17#include <asm/cpu-info.h>
Ralf Baechle69f24d12013-09-17 10:25:47 +020018#include <asm/cpu-type.h>
Ralf Baechlebdc92d742013-05-21 16:59:19 +020019#include <asm/idle.h>
Ralf Baechle49f2ec92013-05-21 10:53:37 +020020#include <asm/mipsregs.h>
21
22/*
23 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
24 * the implementation of the "wait" feature differs between CPU families. This
25 * points to the function that implements CPU specific wait.
26 * The wait instruction stops the pipeline and reduces the power consumption of
27 * the CPU very much.
28 */
29void (*cpu_wait)(void);
30EXPORT_SYMBOL(cpu_wait);
31
Paul Burton97c85802018-06-22 10:55:47 -070032static void __cpuidle r3081_wait(void)
Ralf Baechle49f2ec92013-05-21 10:53:37 +020033{
34 unsigned long cfg = read_c0_conf();
35 write_c0_conf(cfg | R30XX_CONF_HALT);
Ralf Baechlefb40bc32013-05-21 14:05:27 +020036 local_irq_enable();
Ralf Baechle49f2ec92013-05-21 10:53:37 +020037}
38
Paul Burton97c85802018-06-22 10:55:47 -070039static void __cpuidle r39xx_wait(void)
Ralf Baechle49f2ec92013-05-21 10:53:37 +020040{
Ralf Baechle49f2ec92013-05-21 10:53:37 +020041 if (!need_resched())
42 write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
43 local_irq_enable();
44}
45
Paul Burton97c85802018-06-22 10:55:47 -070046void __cpuidle r4k_wait(void)
Ralf Baechle087d9902013-05-21 17:33:32 +020047{
48 local_irq_enable();
49 __r4k_wait();
50}
51
Ralf Baechle49f2ec92013-05-21 10:53:37 +020052/*
53 * This variant is preferable as it allows testing need_resched and going to
54 * sleep depending on the outcome atomically. Unfortunately the "It is
55 * implementation-dependent whether the pipeline restarts when a non-enabled
56 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
57 * using this version a gamble.
58 */
Paul Burton97c85802018-06-22 10:55:47 -070059void __cpuidle r4k_wait_irqoff(void)
Ralf Baechle49f2ec92013-05-21 10:53:37 +020060{
Ralf Baechle49f2ec92013-05-21 10:53:37 +020061 if (!need_resched())
Ralf Baechlef91a1482013-05-21 12:58:08 +020062 __asm__(
63 " .set push \n"
Ralf Baechlea809d462014-03-30 13:20:10 +020064 " .set arch=r4000 \n"
Ralf Baechlef91a1482013-05-21 12:58:08 +020065 " wait \n"
66 " .set pop \n");
Ralf Baechle49f2ec92013-05-21 10:53:37 +020067 local_irq_enable();
Ralf Baechle49f2ec92013-05-21 10:53:37 +020068}
69
70/*
71 * The RM7000 variant has to handle erratum 38. The workaround is to not
72 * have any pending stores when the WAIT instruction is executed.
73 */
Paul Burton97c85802018-06-22 10:55:47 -070074static void __cpuidle rm7k_wait_irqoff(void)
Ralf Baechle49f2ec92013-05-21 10:53:37 +020075{
Ralf Baechle49f2ec92013-05-21 10:53:37 +020076 if (!need_resched())
77 __asm__(
78 " .set push \n"
Ralf Baechlea809d462014-03-30 13:20:10 +020079 " .set arch=r4000 \n"
Ralf Baechle49f2ec92013-05-21 10:53:37 +020080 " .set noat \n"
81 " mfc0 $1, $12 \n"
82 " sync \n"
83 " mtc0 $1, $12 # stalls until W stage \n"
84 " wait \n"
85 " mtc0 $1, $12 # stalls until W stage \n"
86 " .set pop \n");
87 local_irq_enable();
88}
89
90/*
Manuel Lausse63a24d2013-06-08 19:15:41 +000091 * Au1 'wait' is only useful when the 32kHz counter is used as timer,
92 * since coreclock (and the cp0 counter) stops upon executing it. Only an
93 * interrupt can wake it, so they must be enabled before entering idle modes.
Ralf Baechle49f2ec92013-05-21 10:53:37 +020094 */
Paul Burton97c85802018-06-22 10:55:47 -070095static void __cpuidle au1k_wait(void)
Ralf Baechle49f2ec92013-05-21 10:53:37 +020096{
Manuel Lausse63a24d2013-06-08 19:15:41 +000097 unsigned long c0status = read_c0_status() | 1; /* irqs on */
98
Ralf Baechlef91a1482013-05-21 12:58:08 +020099 __asm__(
Paul Burton378ed6f2018-11-08 20:14:38 +0000100 " .set push \n"
101 " .set arch=r4000 \n"
Ralf Baechlef91a1482013-05-21 12:58:08 +0200102 " cache 0x14, 0(%0) \n"
103 " cache 0x14, 32(%0) \n"
104 " sync \n"
Manuel Lausse63a24d2013-06-08 19:15:41 +0000105 " mtc0 %1, $12 \n" /* wr c0status */
Ralf Baechlef91a1482013-05-21 12:58:08 +0200106 " wait \n"
107 " nop \n"
108 " nop \n"
109 " nop \n"
110 " nop \n"
Paul Burton378ed6f2018-11-08 20:14:38 +0000111 " .set pop \n"
Manuel Lausse63a24d2013-06-08 19:15:41 +0000112 : : "r" (au1k_wait), "r" (c0status));
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200113}
114
115static int __initdata nowait;
116
117static int __init wait_disable(char *s)
118{
119 nowait = 1;
120
121 return 1;
122}
123
124__setup("nowait", wait_disable);
125
126void __init check_wait(void)
127{
128 struct cpuinfo_mips *c = &current_cpu_data;
129
130 if (nowait) {
131 printk("Wait instruction disabled.\n");
132 return;
133 }
134
Paul Burton5b10a0e2015-09-22 11:24:20 -0700135 /*
136 * MIPSr6 specifies that masked interrupts should unblock an executing
137 * wait instruction, and thus that it is safe for us to use
138 * r4k_wait_irqoff. Yippee!
139 */
140 if (cpu_has_mips_r6) {
141 cpu_wait = r4k_wait_irqoff;
142 return;
143 }
144
Ralf Baechle69f24d12013-09-17 10:25:47 +0200145 switch (current_cpu_type()) {
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200146 case CPU_R3081:
147 case CPU_R3081E:
148 cpu_wait = r3081_wait;
149 break;
150 case CPU_TX3927:
151 cpu_wait = r39xx_wait;
152 break;
153 case CPU_R4200:
154/* case CPU_R4300: */
155 case CPU_R4600:
156 case CPU_R4640:
157 case CPU_R4650:
158 case CPU_R4700:
159 case CPU_R5000:
160 case CPU_R5500:
161 case CPU_NEVADA:
162 case CPU_4KC:
163 case CPU_4KEC:
164 case CPU_4KSC:
165 case CPU_5KC:
Aurelien Jarnobf463f22015-09-05 18:47:31 +0200166 case CPU_5KE:
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200167 case CPU_25KF:
168 case CPU_PR4450:
169 case CPU_BMIPS3300:
170 case CPU_BMIPS4350:
171 case CPU_BMIPS4380:
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200172 case CPU_CAVIUM_OCTEON:
173 case CPU_CAVIUM_OCTEON_PLUS:
174 case CPU_CAVIUM_OCTEON2:
David Daney4122af02013-07-29 15:07:02 -0700175 case CPU_CAVIUM_OCTEON3:
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200176 case CPU_JZRISC:
177 case CPU_LOONGSON1:
178 case CPU_XLR:
179 case CPU_XLP:
180 cpu_wait = r4k_wait;
181 break;
Huacai Chenb2edcfc2016-03-03 09:45:09 +0800182 case CPU_LOONGSON3:
Huacai Chenf3ade252018-11-15 15:53:52 +0800183 if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
Huacai Chenb2edcfc2016-03-03 09:45:09 +0800184 cpu_wait = r4k_wait;
185 break;
186
Petri Gyntheradaa0b62015-10-19 11:44:24 -0700187 case CPU_BMIPS5000:
188 cpu_wait = r4k_wait_irqoff;
189 break;
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200190 case CPU_RM7000:
191 cpu_wait = rm7k_wait_irqoff;
192 break;
193
James Hogane38df282015-01-29 11:14:11 +0000194 case CPU_PROAPTIV:
195 case CPU_P5600:
196 /*
197 * Incoming Fast Debug Channel (FDC) data during a wait
198 * instruction causes the wait never to resume, even if an
199 * interrupt is received. Avoid using wait at all if FDC data is
200 * likely to be received.
201 */
202 if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
203 break;
204 /* fall through */
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200205 case CPU_M14KC:
206 case CPU_M14KEC:
207 case CPU_24K:
208 case CPU_34K:
209 case CPU_1004K:
Steven J. Hill442e14a2014-01-17 15:03:50 -0600210 case CPU_1074K:
Leonid Yegoshin26ab96d2013-11-27 10:07:53 +0000211 case CPU_INTERAPTIV:
Leonid Yegoshinf36c4722014-03-04 13:34:43 +0000212 case CPU_M5150:
Leonid Yegoshin46950892014-11-24 12:59:01 +0000213 case CPU_QEMU_GENERIC:
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200214 cpu_wait = r4k_wait;
215 if (read_c0_config7() & MIPS_CONF7_WII)
216 cpu_wait = r4k_wait_irqoff;
217 break;
218
219 case CPU_74K:
220 cpu_wait = r4k_wait;
221 if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
222 cpu_wait = r4k_wait_irqoff;
223 break;
224
225 case CPU_TX49XX:
226 cpu_wait = r4k_wait_irqoff;
227 break;
228 case CPU_ALCHEMY:
229 cpu_wait = au1k_wait;
230 break;
231 case CPU_20KC:
232 /*
233 * WAIT on Rev1.0 has E1, E2, E3 and E16.
234 * WAIT on Rev2.0 and Rev3.0 has E16.
235 * Rev3.1 WAIT is nop, why bother
236 */
237 if ((c->processor_id & 0xff) <= 0x64)
238 break;
239
240 /*
241 * Another rev is incremeting c0_count at a reduced clock
242 * rate while in WAIT mode. So we basically have the choice
243 * between using the cp0 timer as clocksource or avoiding
244 * the WAIT instruction. Until more details are known,
245 * disable the use of WAIT for 20Kc entirely.
246 cpu_wait = r4k_wait;
247 */
248 break;
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200249 default:
250 break;
251 }
252}
253
Ralf Baechle00baf852013-05-21 12:47:26 +0200254void arch_cpu_idle(void)
255{
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200256 if (cpu_wait)
Ralf Baechlec9b68692013-05-21 13:02:12 +0200257 cpu_wait();
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200258 else
259 local_irq_enable();
260}
Paul Burtonda9f9702014-04-14 16:16:41 +0100261
262#ifdef CONFIG_CPU_IDLE
263
264int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
265 struct cpuidle_driver *drv, int index)
266{
267 arch_cpu_idle();
268 return index;
269}
270
271#endif