Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 2 | /* |
| 3 | * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver |
| 4 | * |
| 5 | * Created by: Nicolas Pitre, March 2012 |
| 6 | * Copyright: (C) 2012-2013 Linaro Limited |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 7 | */ |
| 8 | |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 9 | #include <linux/atomic.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 10 | #include <linux/init.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/module.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 13 | #include <linux/sched/signal.h> |
Ingo Molnar | ae7e81c | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 14 | #include <uapi/linux/sched/types.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/cpu_pm.h> |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 17 | #include <linux/cpu.h> |
Lorenzo Pieralisi | 3f09d47 | 2012-05-16 15:55:54 +0100 | [diff] [blame] | 18 | #include <linux/cpumask.h> |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 19 | #include <linux/kthread.h> |
| 20 | #include <linux/wait.h> |
Dave Martin | 1bfbddb | 2012-05-14 17:40:07 +0100 | [diff] [blame] | 21 | #include <linux/time.h> |
Lorenzo Pieralisi | 3f09d47 | 2012-05-16 15:55:54 +0100 | [diff] [blame] | 22 | #include <linux/clockchips.h> |
| 23 | #include <linux/hrtimer.h> |
| 24 | #include <linux/tick.h> |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 25 | #include <linux/notifier.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 26 | #include <linux/mm.h> |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 27 | #include <linux/mutex.h> |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 28 | #include <linux/smp.h> |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 29 | #include <linux/spinlock.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 30 | #include <linux/string.h> |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 31 | #include <linux/sysfs.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 32 | #include <linux/irqchip/arm-gic.h> |
Nicolas Pitre | c4821c0 | 2012-11-22 13:33:35 -0500 | [diff] [blame] | 33 | #include <linux/moduleparam.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 34 | |
| 35 | #include <asm/smp_plat.h> |
Dave Martin | 1bfbddb | 2012-05-14 17:40:07 +0100 | [diff] [blame] | 36 | #include <asm/cputype.h> |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 37 | #include <asm/suspend.h> |
| 38 | #include <asm/mcpm.h> |
| 39 | #include <asm/bL_switcher.h> |
| 40 | |
Dave Martin | 1bfbddb | 2012-05-14 17:40:07 +0100 | [diff] [blame] | 41 | #define CREATE_TRACE_POINTS |
| 42 | #include <trace/events/power_cpu_migrate.h> |
| 43 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 44 | |
| 45 | /* |
| 46 | * Use our own MPIDR accessors as the generic ones in asm/cputype.h have |
| 47 | * __attribute_const__ and we don't want the compiler to assume any |
| 48 | * constness here as the value _does_ change along some code paths. |
| 49 | */ |
| 50 | |
| 51 | static int read_mpidr(void) |
| 52 | { |
| 53 | unsigned int id; |
| 54 | asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); |
| 55 | return id & MPIDR_HWID_BITMASK; |
| 56 | } |
| 57 | |
| 58 | /* |
| 59 | * bL switcher core code. |
| 60 | */ |
| 61 | |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 62 | static void bL_do_switch(void *_arg) |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 63 | { |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 64 | unsigned ib_mpidr, ib_cpu, ib_cluster; |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 65 | long volatile handshake, **handshake_ptr = _arg; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 66 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 67 | pr_debug("%s\n", __func__); |
| 68 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 69 | ib_mpidr = cpu_logical_map(smp_processor_id()); |
| 70 | ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); |
| 71 | ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 72 | |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 73 | /* Advertise our handshake location */ |
| 74 | if (handshake_ptr) { |
| 75 | handshake = 0; |
| 76 | *handshake_ptr = &handshake; |
| 77 | } else |
| 78 | handshake = -1; |
| 79 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 80 | /* |
| 81 | * Our state has been saved at this point. Let's release our |
| 82 | * inbound CPU. |
| 83 | */ |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 84 | mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 85 | sev(); |
| 86 | |
| 87 | /* |
| 88 | * From this point, we must assume that our counterpart CPU might |
| 89 | * have taken over in its parallel world already, as if execution |
| 90 | * just returned from cpu_suspend(). It is therefore important to |
| 91 | * be very careful not to make any change the other guy is not |
| 92 | * expecting. This is why we need stack isolation. |
| 93 | * |
| 94 | * Fancy under cover tasks could be performed here. For now |
| 95 | * we have none. |
| 96 | */ |
| 97 | |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 98 | /* |
| 99 | * Let's wait until our inbound is alive. |
| 100 | */ |
| 101 | while (!handshake) { |
| 102 | wfe(); |
| 103 | smp_mb(); |
| 104 | } |
| 105 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 106 | /* Let's put ourself down. */ |
| 107 | mcpm_cpu_power_down(); |
| 108 | |
| 109 | /* should never get here */ |
| 110 | BUG(); |
| 111 | } |
| 112 | |
| 113 | /* |
Nicolas Pitre | c052de2 | 2012-11-27 15:55:33 -0500 | [diff] [blame] | 114 | * Stack isolation. To ensure 'current' remains valid, we just use another |
| 115 | * piece of our thread's stack space which should be fairly lightly used. |
| 116 | * The selected area starts just above the thread_info structure located |
| 117 | * at the very bottom of the stack, aligned to a cache line, and indexed |
| 118 | * with the cluster number. |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 119 | */ |
Nicolas Pitre | c052de2 | 2012-11-27 15:55:33 -0500 | [diff] [blame] | 120 | #define STACK_SIZE 512 |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 121 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); |
| 122 | static int bL_switchpoint(unsigned long _arg) |
| 123 | { |
| 124 | unsigned int mpidr = read_mpidr(); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 125 | unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
Nicolas Pitre | c052de2 | 2012-11-27 15:55:33 -0500 | [diff] [blame] | 126 | void *stack = current_thread_info() + 1; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 127 | stack = PTR_ALIGN(stack, L1_CACHE_BYTES); |
Nicolas Pitre | c052de2 | 2012-11-27 15:55:33 -0500 | [diff] [blame] | 128 | stack += clusterid * STACK_SIZE + STACK_SIZE; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 129 | call_with_stack(bL_do_switch, (void *)_arg, stack); |
| 130 | BUG(); |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * Generic switcher interface |
| 135 | */ |
| 136 | |
Nicolas Pitre | ed96762 | 2012-07-05 21:33:26 -0400 | [diff] [blame] | 137 | static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS]; |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 138 | static int bL_switcher_cpu_pairing[NR_CPUS]; |
Nicolas Pitre | ed96762 | 2012-07-05 21:33:26 -0400 | [diff] [blame] | 139 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 140 | /* |
| 141 | * bL_switch_to - Switch to a specific cluster for the current CPU |
| 142 | * @new_cluster_id: the ID of the cluster to switch to. |
| 143 | * |
| 144 | * This function must be called on the CPU to be switched. |
| 145 | * Returns 0 on success, else a negative status code. |
| 146 | */ |
| 147 | static int bL_switch_to(unsigned int new_cluster_id) |
| 148 | { |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 149 | unsigned int mpidr, this_cpu, that_cpu; |
| 150 | unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; |
Nicolas Pitre | 6137eba | 2013-06-13 23:51:18 -0400 | [diff] [blame] | 151 | struct completion inbound_alive; |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 152 | long volatile *handshake_ptr; |
Nicolas Pitre | 6137eba | 2013-06-13 23:51:18 -0400 | [diff] [blame] | 153 | int ipi_nr, ret; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 154 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 155 | this_cpu = smp_processor_id(); |
| 156 | ob_mpidr = read_mpidr(); |
| 157 | ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0); |
| 158 | ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1); |
| 159 | BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 160 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 161 | if (new_cluster_id == ob_cluster) |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 162 | return 0; |
| 163 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 164 | that_cpu = bL_switcher_cpu_pairing[this_cpu]; |
| 165 | ib_mpidr = cpu_logical_map(that_cpu); |
| 166 | ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); |
| 167 | ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); |
| 168 | |
| 169 | pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n", |
| 170 | this_cpu, ob_mpidr, ib_mpidr); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 171 | |
Nicolas Pitre | 6137eba | 2013-06-13 23:51:18 -0400 | [diff] [blame] | 172 | this_cpu = smp_processor_id(); |
| 173 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 174 | /* Close the gate for our entry vectors */ |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 175 | mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL); |
| 176 | mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 177 | |
Nicolas Pitre | 6137eba | 2013-06-13 23:51:18 -0400 | [diff] [blame] | 178 | /* Install our "inbound alive" notifier. */ |
| 179 | init_completion(&inbound_alive); |
| 180 | ipi_nr = register_ipi_completion(&inbound_alive, this_cpu); |
| 181 | ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]); |
| 182 | mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr); |
| 183 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 184 | /* |
| 185 | * Let's wake up the inbound CPU now in case it requires some delay |
| 186 | * to come online, but leave it gated in our entry vector code. |
| 187 | */ |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 188 | ret = mcpm_cpu_power_up(ib_cpu, ib_cluster); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 189 | if (ret) { |
| 190 | pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret); |
| 191 | return ret; |
| 192 | } |
| 193 | |
| 194 | /* |
Nicolas Pitre | 6137eba | 2013-06-13 23:51:18 -0400 | [diff] [blame] | 195 | * Raise a SGI on the inbound CPU to make sure it doesn't stall |
| 196 | * in a possible WFI, such as in bL_power_down(). |
| 197 | */ |
| 198 | gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0); |
| 199 | |
| 200 | /* |
| 201 | * Wait for the inbound to come up. This allows for other |
| 202 | * tasks to be scheduled in the mean time. |
| 203 | */ |
| 204 | wait_for_completion(&inbound_alive); |
| 205 | mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0); |
| 206 | |
| 207 | /* |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 208 | * From this point we are entering the switch critical zone |
| 209 | * and can't take any interrupts anymore. |
| 210 | */ |
| 211 | local_irq_disable(); |
| 212 | local_fiq_disable(); |
Thomas Gleixner | 41fa421 | 2014-07-16 21:04:50 +0000 | [diff] [blame] | 213 | trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 214 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 215 | /* redirect GIC's SGIs to our counterpart */ |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 216 | gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 217 | |
Thomas Gleixner | 7270d11 | 2015-03-25 13:11:52 +0100 | [diff] [blame] | 218 | tick_suspend_local(); |
Lorenzo Pieralisi | 3f09d47 | 2012-05-16 15:55:54 +0100 | [diff] [blame] | 219 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 220 | ret = cpu_pm_enter(); |
| 221 | |
| 222 | /* we can not tolerate errors at this point */ |
| 223 | if (ret) |
| 224 | panic("%s: cpu_pm_enter() returned %d\n", __func__, ret); |
| 225 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 226 | /* Swap the physical CPUs in the logical map for this logical CPU. */ |
| 227 | cpu_logical_map(this_cpu) = ib_mpidr; |
| 228 | cpu_logical_map(that_cpu) = ob_mpidr; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 229 | |
| 230 | /* Let's do the actual CPU switch. */ |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 231 | ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 232 | if (ret > 0) |
| 233 | panic("%s: cpu_suspend() returned %d\n", __func__, ret); |
| 234 | |
| 235 | /* We are executing on the inbound CPU at this point */ |
| 236 | mpidr = read_mpidr(); |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 237 | pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr); |
| 238 | BUG_ON(mpidr != ib_mpidr); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 239 | |
| 240 | mcpm_cpu_powered_up(); |
| 241 | |
| 242 | ret = cpu_pm_exit(); |
| 243 | |
Thomas Gleixner | 7270d11 | 2015-03-25 13:11:52 +0100 | [diff] [blame] | 244 | tick_resume_local(); |
Lorenzo Pieralisi | 3f09d47 | 2012-05-16 15:55:54 +0100 | [diff] [blame] | 245 | |
Thomas Gleixner | 41fa421 | 2014-07-16 21:04:50 +0000 | [diff] [blame] | 246 | trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr); |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 247 | local_fiq_enable(); |
| 248 | local_irq_enable(); |
| 249 | |
Nicolas Pitre | 108a964 | 2012-10-23 01:39:08 -0400 | [diff] [blame] | 250 | *handshake_ptr = 1; |
| 251 | dsb_sev(); |
| 252 | |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 253 | if (ret) |
| 254 | pr_err("%s exiting with error %d\n", __func__, ret); |
| 255 | return ret; |
| 256 | } |
| 257 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 258 | struct bL_thread { |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 259 | spinlock_t lock; |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 260 | struct task_struct *task; |
| 261 | wait_queue_head_t wq; |
| 262 | int wanted_cluster; |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 263 | struct completion started; |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 264 | bL_switch_completion_handler completer; |
| 265 | void *completer_cookie; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 266 | }; |
| 267 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 268 | static struct bL_thread bL_threads[NR_CPUS]; |
| 269 | |
| 270 | static int bL_switcher_thread(void *arg) |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 271 | { |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 272 | struct bL_thread *t = arg; |
| 273 | struct sched_param param = { .sched_priority = 1 }; |
| 274 | int cluster; |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 275 | bL_switch_completion_handler completer; |
| 276 | void *completer_cookie; |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 277 | |
| 278 | sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 279 | complete(&t->started); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 280 | |
| 281 | do { |
| 282 | if (signal_pending(current)) |
| 283 | flush_signals(current); |
| 284 | wait_event_interruptible(t->wq, |
| 285 | t->wanted_cluster != -1 || |
| 286 | kthread_should_stop()); |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 287 | |
| 288 | spin_lock(&t->lock); |
| 289 | cluster = t->wanted_cluster; |
| 290 | completer = t->completer; |
| 291 | completer_cookie = t->completer_cookie; |
| 292 | t->wanted_cluster = -1; |
| 293 | t->completer = NULL; |
| 294 | spin_unlock(&t->lock); |
| 295 | |
| 296 | if (cluster != -1) { |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 297 | bL_switch_to(cluster); |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 298 | |
| 299 | if (completer) |
| 300 | completer(completer_cookie); |
| 301 | } |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 302 | } while (!kthread_should_stop()); |
| 303 | |
| 304 | return 0; |
| 305 | } |
| 306 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 307 | static struct task_struct *bL_switcher_thread_create(int cpu, void *arg) |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 308 | { |
| 309 | struct task_struct *task; |
| 310 | |
| 311 | task = kthread_create_on_node(bL_switcher_thread, arg, |
| 312 | cpu_to_node(cpu), "kswitcher_%d", cpu); |
| 313 | if (!IS_ERR(task)) { |
| 314 | kthread_bind(task, cpu); |
| 315 | wake_up_process(task); |
| 316 | } else |
| 317 | pr_err("%s failed for CPU %d\n", __func__, cpu); |
| 318 | return task; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 319 | } |
| 320 | |
| 321 | /* |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 322 | * bL_switch_request_cb - Switch to a specific cluster for the given CPU, |
| 323 | * with completion notification via a callback |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 324 | * |
| 325 | * @cpu: the CPU to switch |
| 326 | * @new_cluster_id: the ID of the cluster to switch to. |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 327 | * @completer: switch completion callback. if non-NULL, |
| 328 | * @completer(@completer_cookie) will be called on completion of |
| 329 | * the switch, in non-atomic context. |
| 330 | * @completer_cookie: opaque context argument for @completer. |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 331 | * |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 332 | * This function causes a cluster switch on the given CPU by waking up |
| 333 | * the appropriate switcher thread. This function may or may not return |
| 334 | * before the switch has occurred. |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 335 | * |
| 336 | * If a @completer callback function is supplied, it will be called when |
| 337 | * the switch is complete. This can be used to determine asynchronously |
| 338 | * when the switch is complete, regardless of when bL_switch_request() |
| 339 | * returns. When @completer is supplied, no new switch request is permitted |
| 340 | * for the affected CPU until after the switch is complete, and @completer |
| 341 | * has returned. |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 342 | */ |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 343 | int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, |
| 344 | bL_switch_completion_handler completer, |
| 345 | void *completer_cookie) |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 346 | { |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 347 | struct bL_thread *t; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 348 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 349 | if (cpu >= ARRAY_SIZE(bL_threads)) { |
| 350 | pr_err("%s: cpu %d out of bounds\n", __func__, cpu); |
| 351 | return -EINVAL; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 352 | } |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 353 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 354 | t = &bL_threads[cpu]; |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 355 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 356 | if (IS_ERR(t->task)) |
| 357 | return PTR_ERR(t->task); |
| 358 | if (!t->task) |
| 359 | return -ESRCH; |
| 360 | |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 361 | spin_lock(&t->lock); |
| 362 | if (t->completer) { |
| 363 | spin_unlock(&t->lock); |
| 364 | return -EBUSY; |
| 365 | } |
| 366 | t->completer = completer; |
| 367 | t->completer_cookie = completer_cookie; |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 368 | t->wanted_cluster = new_cluster_id; |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 369 | spin_unlock(&t->lock); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 370 | wake_up(&t->wq); |
| 371 | return 0; |
Nicolas Pitre | 1c33be5 | 2012-04-12 02:56:10 -0400 | [diff] [blame] | 372 | } |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 373 | EXPORT_SYMBOL_GPL(bL_switch_request_cb); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 374 | |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 375 | /* |
| 376 | * Activation and configuration code. |
| 377 | */ |
| 378 | |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 379 | static DEFINE_MUTEX(bL_switcher_activation_lock); |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 380 | static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 381 | static unsigned int bL_switcher_active; |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 382 | static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS]; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 383 | static cpumask_t bL_switcher_removed_logical_cpus; |
| 384 | |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 385 | int bL_switcher_register_notifier(struct notifier_block *nb) |
| 386 | { |
| 387 | return blocking_notifier_chain_register(&bL_activation_notifier, nb); |
| 388 | } |
| 389 | EXPORT_SYMBOL_GPL(bL_switcher_register_notifier); |
| 390 | |
| 391 | int bL_switcher_unregister_notifier(struct notifier_block *nb) |
| 392 | { |
| 393 | return blocking_notifier_chain_unregister(&bL_activation_notifier, nb); |
| 394 | } |
| 395 | EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier); |
| 396 | |
| 397 | static int bL_activation_notify(unsigned long val) |
| 398 | { |
| 399 | int ret; |
| 400 | |
| 401 | ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL); |
| 402 | if (ret & NOTIFY_STOP_MASK) |
| 403 | pr_err("%s: notifier chain failed with status 0x%x\n", |
| 404 | __func__, ret); |
| 405 | return notifier_to_errno(ret); |
| 406 | } |
| 407 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 408 | static void bL_switcher_restore_cpus(void) |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 409 | { |
| 410 | int i; |
| 411 | |
Nicolas Pitre | 3f8517e | 2014-05-23 22:31:44 +0100 | [diff] [blame] | 412 | for_each_cpu(i, &bL_switcher_removed_logical_cpus) { |
| 413 | struct device *cpu_dev = get_cpu_device(i); |
| 414 | int ret = device_online(cpu_dev); |
| 415 | if (ret) |
| 416 | dev_err(cpu_dev, "switcher: unable to restore CPU\n"); |
| 417 | } |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 418 | } |
| 419 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 420 | static int bL_switcher_halve_cpus(void) |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 421 | { |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 422 | int i, j, cluster_0, gic_id, ret; |
| 423 | unsigned int cpu, cluster, mask; |
| 424 | cpumask_t available_cpus; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 425 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 426 | /* First pass to validate what we have */ |
| 427 | mask = 0; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 428 | for_each_online_cpu(i) { |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 429 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); |
| 430 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 431 | if (cluster >= 2) { |
| 432 | pr_err("%s: only dual cluster systems are supported\n", __func__); |
| 433 | return -EINVAL; |
| 434 | } |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 435 | if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER)) |
| 436 | return -EINVAL; |
| 437 | mask |= (1 << cluster); |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 438 | } |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 439 | if (mask != 3) { |
| 440 | pr_err("%s: no CPU pairing possible\n", __func__); |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 441 | return -EINVAL; |
| 442 | } |
| 443 | |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 444 | /* |
| 445 | * Now let's do the pairing. We match each CPU with another CPU |
| 446 | * from a different cluster. To get a uniform scheduling behavior |
| 447 | * without fiddling with CPU topology and compute capacity data, |
| 448 | * we'll use logical CPUs initially belonging to the same cluster. |
| 449 | */ |
| 450 | memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing)); |
| 451 | cpumask_copy(&available_cpus, cpu_online_mask); |
| 452 | cluster_0 = -1; |
| 453 | for_each_cpu(i, &available_cpus) { |
| 454 | int match = -1; |
| 455 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); |
| 456 | if (cluster_0 == -1) |
| 457 | cluster_0 = cluster; |
| 458 | if (cluster != cluster_0) |
| 459 | continue; |
| 460 | cpumask_clear_cpu(i, &available_cpus); |
| 461 | for_each_cpu(j, &available_cpus) { |
| 462 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1); |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 463 | /* |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 464 | * Let's remember the last match to create "odd" |
| 465 | * pairings on purpose in order for other code not |
| 466 | * to assume any relation between physical and |
| 467 | * logical CPU numbers. |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 468 | */ |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 469 | if (cluster != cluster_0) |
| 470 | match = j; |
| 471 | } |
| 472 | if (match != -1) { |
| 473 | bL_switcher_cpu_pairing[i] = match; |
| 474 | cpumask_clear_cpu(match, &available_cpus); |
| 475 | pr_info("CPU%d paired with CPU%d\n", i, match); |
| 476 | } |
| 477 | } |
| 478 | |
| 479 | /* |
| 480 | * Now we disable the unwanted CPUs i.e. everything that has no |
| 481 | * pairing information (that includes the pairing counterparts). |
| 482 | */ |
| 483 | cpumask_clear(&bL_switcher_removed_logical_cpus); |
| 484 | for_each_online_cpu(i) { |
| 485 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); |
| 486 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); |
| 487 | |
| 488 | /* Let's take note of the GIC ID for this CPU */ |
| 489 | gic_id = gic_get_cpu_id(i); |
| 490 | if (gic_id < 0) { |
| 491 | pr_err("%s: bad GIC ID for CPU %d\n", __func__, i); |
| 492 | bL_switcher_restore_cpus(); |
| 493 | return -EINVAL; |
| 494 | } |
| 495 | bL_gic_id[cpu][cluster] = gic_id; |
| 496 | pr_info("GIC ID for CPU %u cluster %u is %u\n", |
| 497 | cpu, cluster, gic_id); |
| 498 | |
| 499 | if (bL_switcher_cpu_pairing[i] != -1) { |
| 500 | bL_switcher_cpu_original_cluster[i] = cluster; |
| 501 | continue; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 502 | } |
| 503 | |
Nicolas Pitre | 3f8517e | 2014-05-23 22:31:44 +0100 | [diff] [blame] | 504 | ret = device_offline(get_cpu_device(i)); |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 505 | if (ret) { |
| 506 | bL_switcher_restore_cpus(); |
| 507 | return ret; |
| 508 | } |
| 509 | cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus); |
| 510 | } |
| 511 | |
| 512 | return 0; |
| 513 | } |
| 514 | |
Dave Martin | d08e2e0 | 2013-02-13 16:20:44 +0000 | [diff] [blame] | 515 | /* Determine the logical CPU a given physical CPU is grouped on. */ |
| 516 | int bL_switcher_get_logical_index(u32 mpidr) |
| 517 | { |
| 518 | int cpu; |
| 519 | |
| 520 | if (!bL_switcher_active) |
| 521 | return -EUNATCH; |
| 522 | |
| 523 | mpidr &= MPIDR_HWID_BITMASK; |
| 524 | for_each_online_cpu(cpu) { |
| 525 | int pairing = bL_switcher_cpu_pairing[cpu]; |
| 526 | if (pairing == -1) |
| 527 | continue; |
| 528 | if ((mpidr == cpu_logical_map(cpu)) || |
| 529 | (mpidr == cpu_logical_map(pairing))) |
| 530 | return cpu; |
| 531 | } |
| 532 | return -EINVAL; |
| 533 | } |
| 534 | |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 535 | static void bL_switcher_trace_trigger_cpu(void *__always_unused info) |
| 536 | { |
Thomas Gleixner | 41fa421 | 2014-07-16 21:04:50 +0000 | [diff] [blame] | 537 | trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr()); |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 538 | } |
| 539 | |
Dave Martin | 29064b8 | 2013-02-11 14:39:19 +0000 | [diff] [blame] | 540 | int bL_switcher_trace_trigger(void) |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 541 | { |
| 542 | int ret; |
| 543 | |
| 544 | preempt_disable(); |
| 545 | |
| 546 | bL_switcher_trace_trigger_cpu(NULL); |
| 547 | ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true); |
| 548 | |
| 549 | preempt_enable(); |
| 550 | |
| 551 | return ret; |
| 552 | } |
Dave Martin | 29064b8 | 2013-02-11 14:39:19 +0000 | [diff] [blame] | 553 | EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger); |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 554 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 555 | static int bL_switcher_enable(void) |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 556 | { |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 557 | int cpu, ret; |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 558 | |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 559 | mutex_lock(&bL_switcher_activation_lock); |
Tushar Behera | b0ced9d | 2013-10-31 06:46:14 +0100 | [diff] [blame] | 560 | lock_device_hotplug(); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 561 | if (bL_switcher_active) { |
Tushar Behera | b0ced9d | 2013-10-31 06:46:14 +0100 | [diff] [blame] | 562 | unlock_device_hotplug(); |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 563 | mutex_unlock(&bL_switcher_activation_lock); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 564 | return 0; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 565 | } |
| 566 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 567 | pr_info("big.LITTLE switcher initializing\n"); |
| 568 | |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 569 | ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE); |
| 570 | if (ret) |
| 571 | goto error; |
| 572 | |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 573 | ret = bL_switcher_halve_cpus(); |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 574 | if (ret) |
| 575 | goto error; |
Nicolas Pitre | 9797a0e | 2012-11-21 11:53:27 -0500 | [diff] [blame] | 576 | |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 577 | bL_switcher_trace_trigger(); |
| 578 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 579 | for_each_online_cpu(cpu) { |
| 580 | struct bL_thread *t = &bL_threads[cpu]; |
Dave Martin | 0577fee | 2013-05-22 19:08:16 +0100 | [diff] [blame] | 581 | spin_lock_init(&t->lock); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 582 | init_waitqueue_head(&t->wq); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 583 | init_completion(&t->started); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 584 | t->wanted_cluster = -1; |
| 585 | t->task = bL_switcher_thread_create(cpu, t); |
| 586 | } |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 587 | |
| 588 | bL_switcher_active = 1; |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 589 | bL_activation_notify(BL_NOTIFY_POST_ENABLE); |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 590 | pr_info("big.LITTLE switcher initialized\n"); |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 591 | goto out; |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 592 | |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 593 | error: |
| 594 | pr_warn("big.LITTLE switcher initialization failed\n"); |
| 595 | bL_activation_notify(BL_NOTIFY_POST_DISABLE); |
| 596 | |
| 597 | out: |
Tushar Behera | b0ced9d | 2013-10-31 06:46:14 +0100 | [diff] [blame] | 598 | unlock_device_hotplug(); |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 599 | mutex_unlock(&bL_switcher_activation_lock); |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 600 | return ret; |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 601 | } |
| 602 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 603 | #ifdef CONFIG_SYSFS |
| 604 | |
| 605 | static void bL_switcher_disable(void) |
| 606 | { |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 607 | unsigned int cpu, cluster; |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 608 | struct bL_thread *t; |
| 609 | struct task_struct *task; |
| 610 | |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 611 | mutex_lock(&bL_switcher_activation_lock); |
Tushar Behera | b0ced9d | 2013-10-31 06:46:14 +0100 | [diff] [blame] | 612 | lock_device_hotplug(); |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 613 | |
| 614 | if (!bL_switcher_active) |
| 615 | goto out; |
| 616 | |
| 617 | if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) { |
| 618 | bL_activation_notify(BL_NOTIFY_POST_ENABLE); |
| 619 | goto out; |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 620 | } |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 621 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 622 | bL_switcher_active = 0; |
| 623 | |
| 624 | /* |
| 625 | * To deactivate the switcher, we must shut down the switcher |
| 626 | * threads to prevent any other requests from being accepted. |
| 627 | * Then, if the final cluster for given logical CPU is not the |
| 628 | * same as the original one, we'll recreate a switcher thread |
| 629 | * just for the purpose of switching the CPU back without any |
| 630 | * possibility for interference from external requests. |
| 631 | */ |
| 632 | for_each_online_cpu(cpu) { |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 633 | t = &bL_threads[cpu]; |
| 634 | task = t->task; |
| 635 | t->task = NULL; |
| 636 | if (!task || IS_ERR(task)) |
| 637 | continue; |
| 638 | kthread_stop(task); |
| 639 | /* no more switch may happen on this CPU at this point */ |
| 640 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); |
| 641 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) |
| 642 | continue; |
| 643 | init_completion(&t->started); |
| 644 | t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu]; |
| 645 | task = bL_switcher_thread_create(cpu, t); |
| 646 | if (!IS_ERR(task)) { |
| 647 | wait_for_completion(&t->started); |
| 648 | kthread_stop(task); |
| 649 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); |
| 650 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) |
| 651 | continue; |
| 652 | } |
| 653 | /* If execution gets here, we're in trouble. */ |
| 654 | pr_crit("%s: unable to restore original cluster for CPU %d\n", |
| 655 | __func__, cpu); |
Nicolas Pitre | 38c35d4 | 2013-06-13 23:42:46 -0400 | [diff] [blame] | 656 | pr_crit("%s: CPU %d can't be restored\n", |
| 657 | __func__, bL_switcher_cpu_pairing[cpu]); |
| 658 | cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu], |
| 659 | &bL_switcher_removed_logical_cpus); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 660 | } |
| 661 | |
| 662 | bL_switcher_restore_cpus(); |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 663 | bL_switcher_trace_trigger(); |
| 664 | |
Dave Martin | 491990e | 2012-12-10 17:19:58 +0000 | [diff] [blame] | 665 | bL_activation_notify(BL_NOTIFY_POST_DISABLE); |
| 666 | |
| 667 | out: |
Tushar Behera | b0ced9d | 2013-10-31 06:46:14 +0100 | [diff] [blame] | 668 | unlock_device_hotplug(); |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 669 | mutex_unlock(&bL_switcher_activation_lock); |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 670 | } |
| 671 | |
| 672 | static ssize_t bL_switcher_active_show(struct kobject *kobj, |
| 673 | struct kobj_attribute *attr, char *buf) |
| 674 | { |
| 675 | return sprintf(buf, "%u\n", bL_switcher_active); |
| 676 | } |
| 677 | |
| 678 | static ssize_t bL_switcher_active_store(struct kobject *kobj, |
| 679 | struct kobj_attribute *attr, const char *buf, size_t count) |
| 680 | { |
| 681 | int ret; |
| 682 | |
| 683 | switch (buf[0]) { |
| 684 | case '0': |
| 685 | bL_switcher_disable(); |
| 686 | ret = 0; |
| 687 | break; |
| 688 | case '1': |
| 689 | ret = bL_switcher_enable(); |
| 690 | break; |
| 691 | default: |
| 692 | ret = -EINVAL; |
| 693 | } |
| 694 | |
| 695 | return (ret >= 0) ? count : ret; |
| 696 | } |
| 697 | |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 698 | static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj, |
| 699 | struct kobj_attribute *attr, const char *buf, size_t count) |
| 700 | { |
| 701 | int ret = bL_switcher_trace_trigger(); |
| 702 | |
| 703 | return ret ? ret : count; |
| 704 | } |
| 705 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 706 | static struct kobj_attribute bL_switcher_active_attr = |
| 707 | __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store); |
| 708 | |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 709 | static struct kobj_attribute bL_switcher_trace_trigger_attr = |
| 710 | __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store); |
| 711 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 712 | static struct attribute *bL_switcher_attrs[] = { |
| 713 | &bL_switcher_active_attr.attr, |
Dave Martin | b09bbe5 | 2013-02-06 15:45:23 +0000 | [diff] [blame] | 714 | &bL_switcher_trace_trigger_attr.attr, |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 715 | NULL, |
| 716 | }; |
| 717 | |
| 718 | static struct attribute_group bL_switcher_attr_group = { |
| 719 | .attrs = bL_switcher_attrs, |
| 720 | }; |
| 721 | |
| 722 | static struct kobject *bL_switcher_kobj; |
| 723 | |
| 724 | static int __init bL_switcher_sysfs_init(void) |
| 725 | { |
| 726 | int ret; |
| 727 | |
| 728 | bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj); |
| 729 | if (!bL_switcher_kobj) |
| 730 | return -ENOMEM; |
| 731 | ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group); |
| 732 | if (ret) |
| 733 | kobject_put(bL_switcher_kobj); |
| 734 | return ret; |
| 735 | } |
| 736 | |
| 737 | #endif /* CONFIG_SYSFS */ |
| 738 | |
Dave Martin | c0f4375 | 2012-12-10 17:19:57 +0000 | [diff] [blame] | 739 | bool bL_switcher_get_enabled(void) |
| 740 | { |
| 741 | mutex_lock(&bL_switcher_activation_lock); |
| 742 | |
| 743 | return bL_switcher_active; |
| 744 | } |
| 745 | EXPORT_SYMBOL_GPL(bL_switcher_get_enabled); |
| 746 | |
| 747 | void bL_switcher_put_enabled(void) |
| 748 | { |
| 749 | mutex_unlock(&bL_switcher_activation_lock); |
| 750 | } |
| 751 | EXPORT_SYMBOL_GPL(bL_switcher_put_enabled); |
| 752 | |
Nicolas Pitre | 27261435 | 2012-11-26 22:48:55 -0500 | [diff] [blame] | 753 | /* |
| 754 | * Veto any CPU hotplug operation on those CPUs we've removed |
| 755 | * while the switcher is active. |
| 756 | * We're just not ready to deal with that given the trickery involved. |
| 757 | */ |
Sebastian Andrzej Siewior | a3c9b14 | 2016-11-17 19:35:35 +0100 | [diff] [blame] | 758 | static int bL_switcher_cpu_pre(unsigned int cpu) |
Nicolas Pitre | 27261435 | 2012-11-26 22:48:55 -0500 | [diff] [blame] | 759 | { |
Sebastian Andrzej Siewior | a3c9b14 | 2016-11-17 19:35:35 +0100 | [diff] [blame] | 760 | int pairing; |
| 761 | |
| 762 | if (!bL_switcher_active) |
| 763 | return 0; |
| 764 | |
| 765 | pairing = bL_switcher_cpu_pairing[cpu]; |
| 766 | |
| 767 | if (pairing == -1) |
| 768 | return -EINVAL; |
| 769 | return 0; |
Nicolas Pitre | 27261435 | 2012-11-26 22:48:55 -0500 | [diff] [blame] | 770 | } |
| 771 | |
Nicolas Pitre | c4821c0 | 2012-11-22 13:33:35 -0500 | [diff] [blame] | 772 | static bool no_bL_switcher; |
| 773 | core_param(no_bL_switcher, no_bL_switcher, bool, 0644); |
| 774 | |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 775 | static int __init bL_switcher_init(void) |
| 776 | { |
| 777 | int ret; |
| 778 | |
Nicolas Pitre | 4530e4b | 2014-04-22 00:25:35 +0100 | [diff] [blame] | 779 | if (!mcpm_is_available()) |
| 780 | return -ENODEV; |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 781 | |
Sebastian Andrzej Siewior | a3c9b14 | 2016-11-17 19:35:35 +0100 | [diff] [blame] | 782 | cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare", |
| 783 | bL_switcher_cpu_pre, NULL); |
| 784 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown", |
| 785 | NULL, bL_switcher_cpu_pre); |
| 786 | if (ret < 0) { |
| 787 | cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE); |
| 788 | pr_err("bL_switcher: Failed to allocate a hotplug state\n"); |
| 789 | return ret; |
| 790 | } |
Nicolas Pitre | c4821c0 | 2012-11-22 13:33:35 -0500 | [diff] [blame] | 791 | if (!no_bL_switcher) { |
| 792 | ret = bL_switcher_enable(); |
| 793 | if (ret) |
| 794 | return ret; |
| 795 | } |
Nicolas Pitre | 6b7437a | 2012-11-22 00:05:07 -0500 | [diff] [blame] | 796 | |
| 797 | #ifdef CONFIG_SYSFS |
| 798 | ret = bL_switcher_sysfs_init(); |
| 799 | if (ret) |
| 800 | pr_err("%s: unable to create sysfs entry\n", __func__); |
| 801 | #endif |
| 802 | |
| 803 | return 0; |
| 804 | } |
| 805 | |
Nicolas Pitre | 71ce1de | 2012-10-26 02:36:17 -0400 | [diff] [blame] | 806 | late_initcall(bL_switcher_init); |