blob: e899a5446d0e73c2c3916f2e0513929686720f26 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
Viresh Kumarbb176f72013-06-19 14:19:33 +05306 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Ashok Rajc32b6b82005-10-30 14:59:54 -08008 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
Dave Jones32ee8c32006-02-28 00:43:23 -05009 * Added handling for CPU hotplug
Dave Jones8ff69732006-03-05 03:37:23 -050010 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
Ashok Rajc32b6b82005-10-30 14:59:54 -080012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Viresh Kumardb701152012-10-23 01:29:03 +020018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Viresh Kumar5ff0a262013-08-06 22:53:03 +053020#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/cpufreq.h>
22#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/device.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053024#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080027#include <linux/mutex.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053028#include <linux/slab.h>
Viresh Kumar2f0aea92014-03-04 11:00:26 +080029#include <linux/suspend.h>
Doug Anderson90de2a42014-12-23 22:09:48 -080030#include <linux/syscore_ops.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053031#include <linux/tick.h>
Thomas Renninger6f4f2722010-04-20 13:17:36 +020032#include <trace/events/power.h>
33
Viresh Kumarb4f06762015-01-27 14:06:08 +053034static LIST_HEAD(cpufreq_policy_list);
Viresh Kumarf9637352015-05-12 12:20:11 +053035
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42{
43 return active == !policy_is_inactive(policy);
44}
45
46/* Finds Next Acive/Inactive policy */
47static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49{
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59}
60
61static struct cpufreq_policy *first_policy(bool active)
62{
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76}
77
78/* Macros to iterate over CPU policies */
79#define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84#define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86#define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89#define for_each_policy(__policy) \
Viresh Kumarb4f06762015-01-27 14:06:08 +053090 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
Viresh Kumarf7b27062015-01-27 14:06:09 +053092/* Iterate over governors */
93static LIST_HEAD(cpufreq_governor_list);
94#define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097/**
Dave Jonescd878472006-08-11 17:59:28 -040098 * The "cpufreq driver" - the arch- or hardware-dependent low
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200102static struct cpufreq_driver *cpufreq_driver;
Mike Travis7a6aedf2008-03-25 15:06:53 -0700103static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +0530104static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530105static DEFINE_RWLOCK(cpufreq_driver_lock);
Jane Li6f1e4ef2014-01-03 17:17:41 +0800106DEFINE_MUTEX(cpufreq_governor_lock);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530107
Thomas Renninger084f3492007-07-09 11:35:28 -0700108/* This one keeps track of the previously set governor of a removed CPU */
Dmitry Monakhove77b89f2009-10-05 00:38:55 +0400109static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Viresh Kumar2f0aea92014-03-04 11:00:26 +0800111/* Flag to suspend/resume CPUFreq governors */
112static bool cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530114static inline bool has_target(void)
115{
116 return cpufreq_driver->target_index || cpufreq_driver->target;
117}
118
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800119/*
Viresh Kumar6eed9402013-08-06 22:53:11 +0530120 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
121 * sections
122 */
123static DECLARE_RWSEM(cpufreq_rwsem);
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125/* internal prototypes */
Dave Jones29464f22009-01-18 01:37:11 -0500126static int __cpufreq_governor(struct cpufreq_policy *policy,
127 unsigned int event);
Viresh Kumard92d50a2015-01-02 12:34:29 +0530128static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
David Howells65f27f32006-11-22 14:55:48 +0000129static void handle_update(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131/**
Dave Jones32ee8c32006-02-28 00:43:23 -0500132 * Two notifier lists: the "policy" list is involved in the
133 * validation process for a new CPU frequency policy; the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 * "transition" list for kernel code that needs to handle
135 * changes to devices when the CPU clock speed changes.
136 * The mutex locks both lists.
137 */
Alan Sterne041c682006-03-27 01:16:30 -0800138static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700139static struct srcu_notifier_head cpufreq_transition_notifier_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200141static bool init_cpufreq_transition_notifier_list_called;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700142static int __init init_cpufreq_transition_notifier_list(void)
143{
144 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200145 init_cpufreq_transition_notifier_list_called = true;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700146 return 0;
147}
Linus Torvaldsb3438f82006-11-20 11:47:18 -0800148pure_initcall(init_cpufreq_transition_notifier_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400150static int off __read_mostly;
Viresh Kumarda584452012-10-26 00:51:32 +0200151static int cpufreq_disabled(void)
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400152{
153 return off;
154}
155void disable_cpufreq(void)
156{
157 off = 1;
158}
Dave Jones29464f22009-01-18 01:37:11 -0500159static DEFINE_MUTEX(cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000161bool have_governor_per_policy(void)
162{
Viresh Kumar0b981e72013-10-02 14:13:18 +0530163 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000164}
Viresh Kumar3f869d62013-05-16 05:09:56 +0000165EXPORT_SYMBOL_GPL(have_governor_per_policy);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000166
Viresh Kumar944e9a02013-05-16 05:09:57 +0000167struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
168{
169 if (have_governor_per_policy())
170 return &policy->kobj;
171 else
172 return cpufreq_global_kobject;
173}
174EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
175
Viresh Kumar72a4ce32013-05-17 11:26:32 +0000176static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
177{
178 u64 idle_time;
179 u64 cur_wall_time;
180 u64 busy_time;
181
182 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
183
184 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
185 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
186 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
187 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
188 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
189 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
190
191 idle_time = cur_wall_time - busy_time;
192 if (wall)
193 *wall = cputime_to_usecs(cur_wall_time);
194
195 return cputime_to_usecs(idle_time);
196}
197
198u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
199{
200 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
201
202 if (idle_time == -1ULL)
203 return get_cpu_idle_time_jiffy(cpu, wall);
204 else if (!io_busy)
205 idle_time += get_cpu_iowait_time_us(cpu, wall);
206
207 return idle_time;
208}
209EXPORT_SYMBOL_GPL(get_cpu_idle_time);
210
Viresh Kumar70e9e772013-10-03 20:29:07 +0530211/*
212 * This is a generic cpufreq init() routine which can be used by cpufreq
213 * drivers of SMP systems. It will do following:
214 * - validate & show freq table passed
215 * - set policies transition latency
216 * - policy->cpus with all possible CPUs
217 */
218int cpufreq_generic_init(struct cpufreq_policy *policy,
219 struct cpufreq_frequency_table *table,
220 unsigned int transition_latency)
221{
222 int ret;
223
224 ret = cpufreq_table_validate_and_show(policy, table);
225 if (ret) {
226 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
227 return ret;
228 }
229
230 policy->cpuinfo.transition_latency = transition_latency;
231
232 /*
233 * The driver only supports the SMP configuartion where all processors
234 * share the clock and voltage and clock.
235 */
236 cpumask_setall(policy->cpus);
237
238 return 0;
239}
240EXPORT_SYMBOL_GPL(cpufreq_generic_init);
241
Viresh Kumar988bed02015-05-08 11:53:45 +0530242/* Only for cpufreq core internal use */
243struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
Viresh Kumar652ed952014-01-09 20:38:43 +0530244{
245 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
246
Viresh Kumar988bed02015-05-08 11:53:45 +0530247 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
248}
249
250unsigned int cpufreq_generic_get(unsigned int cpu)
251{
252 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
253
Viresh Kumar652ed952014-01-09 20:38:43 +0530254 if (!policy || IS_ERR(policy->clk)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700255 pr_err("%s: No %s associated to cpu: %d\n",
256 __func__, policy ? "clk" : "policy", cpu);
Viresh Kumar652ed952014-01-09 20:38:43 +0530257 return 0;
258 }
259
260 return clk_get_rate(policy->clk) / 1000;
261}
262EXPORT_SYMBOL_GPL(cpufreq_generic_get);
263
Viresh Kumar50e9c852015-02-19 17:02:03 +0530264/**
265 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
266 *
267 * @cpu: cpu to find policy for.
268 *
269 * This returns policy for 'cpu', returns NULL if it doesn't exist.
270 * It also increments the kobject reference count to mark it busy and so would
271 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
272 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
273 * freed as that depends on the kobj count.
274 *
275 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
276 * valid policy is found. This is done to make sure the driver doesn't get
277 * unregistered while the policy is being used.
278 *
279 * Return: A valid policy on success, otherwise NULL on failure.
280 */
Viresh Kumar6eed9402013-08-06 22:53:11 +0530281struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530283 struct cpufreq_policy *policy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 unsigned long flags;
285
Viresh Kumar1b947c902015-02-19 17:02:05 +0530286 if (WARN_ON(cpu >= nr_cpu_ids))
Viresh Kumar6eed9402013-08-06 22:53:11 +0530287 return NULL;
288
289 if (!down_read_trylock(&cpufreq_rwsem))
290 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
292 /* get the cpufreq driver */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000293 read_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Viresh Kumar6eed9402013-08-06 22:53:11 +0530295 if (cpufreq_driver) {
296 /* get the CPU */
Viresh Kumar988bed02015-05-08 11:53:45 +0530297 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530298 if (policy)
299 kobject_get(&policy->kobj);
300 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200301
Viresh Kumar6eed9402013-08-06 22:53:11 +0530302 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530304 if (!policy)
Viresh Kumar6eed9402013-08-06 22:53:11 +0530305 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530307 return policy;
Stephen Boyda9144432012-07-20 18:14:38 +0000308}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
310
Viresh Kumar50e9c852015-02-19 17:02:03 +0530311/**
312 * cpufreq_cpu_put: Decrements the usage count of a policy
313 *
314 * @policy: policy earlier returned by cpufreq_cpu_get().
315 *
316 * This decrements the kobject reference count incremented earlier by calling
317 * cpufreq_cpu_get().
318 *
319 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
320 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530321void cpufreq_cpu_put(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530323 kobject_put(&policy->kobj);
324 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
326EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
330 *********************************************************************/
331
332/**
333 * adjust_jiffies - adjust the system "loops_per_jiffy"
334 *
335 * This function alters the system "loops_per_jiffy" for the clock
336 * speed change. Note that loops_per_jiffy cannot be updated on SMP
Dave Jones32ee8c32006-02-28 00:43:23 -0500337 * systems as each CPU might be scaled differently. So, use the arch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 * per-CPU loops_per_jiffy value wherever possible.
339 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800340static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
Viresh Kumar39c132e2015-01-02 12:34:34 +0530342#ifndef CONFIG_SMP
343 static unsigned long l_p_j_ref;
344 static unsigned int l_p_j_ref_freq;
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 if (ci->flags & CPUFREQ_CONST_LOOPS)
347 return;
348
349 if (!l_p_j_ref_freq) {
350 l_p_j_ref = loops_per_jiffy;
351 l_p_j_ref_freq = ci->old;
Joe Perchese837f9b2014-03-11 10:03:00 -0700352 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
353 l_p_j_ref, l_p_j_ref_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 }
Viresh Kumar0b443ea2014-03-19 11:24:58 +0530355 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530356 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
357 ci->new);
Joe Perchese837f9b2014-03-11 10:03:00 -0700358 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
359 loops_per_jiffy, ci->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361#endif
Viresh Kumar39c132e2015-01-02 12:34:34 +0530362}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
Viresh Kumar0956df9c2013-06-19 14:19:34 +0530364static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530365 struct cpufreq_freqs *freqs, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 BUG_ON(irqs_disabled());
368
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000369 if (cpufreq_disabled())
370 return;
371
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200372 freqs->flags = cpufreq_driver->flags;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200373 pr_debug("notification %u of frequency transition to %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -0700374 state, freqs->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 switch (state) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 case CPUFREQ_PRECHANGE:
Dave Jones32ee8c32006-02-28 00:43:23 -0500379 /* detect if the driver reported a value as "old frequency"
Dave Jonese4472cb2006-01-31 15:53:55 -0800380 * which is not equal to what the cpufreq core thinks is
381 * "old frequency".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200383 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800384 if ((policy) && (policy->cpu == freqs->cpu) &&
385 (policy->cur) && (policy->cur != freqs->old)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700386 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
387 freqs->old, policy->cur);
Dave Jonese4472cb2006-01-31 15:53:55 -0800388 freqs->old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 }
390 }
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700391 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800392 CPUFREQ_PRECHANGE, freqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
394 break;
Dave Jonese4472cb2006-01-31 15:53:55 -0800395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 case CPUFREQ_POSTCHANGE:
397 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
Joe Perchese837f9b2014-03-11 10:03:00 -0700398 pr_debug("FREQ: %lu - CPU: %lu\n",
399 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
Thomas Renninger25e41932011-01-03 17:50:44 +0100400 trace_cpu_frequency(freqs->new, freqs->cpu);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700401 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800402 CPUFREQ_POSTCHANGE, freqs);
Dave Jonese4472cb2006-01-31 15:53:55 -0800403 if (likely(policy) && likely(policy->cpu == freqs->cpu))
404 policy->cur = freqs->new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 break;
406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407}
Viresh Kumarbb176f72013-06-19 14:19:33 +0530408
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530409/**
410 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
411 * on frequency transition.
412 *
413 * This function calls the transition notifiers and the "adjust_jiffies"
414 * function. It is called twice on all CPU frequency changes that have
415 * external effects.
416 */
Viresh Kumar236a9802014-03-24 13:35:46 +0530417static void cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530418 struct cpufreq_freqs *freqs, unsigned int state)
419{
420 for_each_cpu(freqs->cpu, policy->cpus)
421 __cpufreq_notify_transition(policy, freqs, state);
422}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530424/* Do post notifications when there are chances that transition has failed */
Viresh Kumar236a9802014-03-24 13:35:46 +0530425static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530426 struct cpufreq_freqs *freqs, int transition_failed)
427{
428 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
429 if (!transition_failed)
430 return;
431
432 swap(freqs->old, freqs->new);
433 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
434 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
435}
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530436
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530437void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
438 struct cpufreq_freqs *freqs)
439{
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530440
441 /*
442 * Catch double invocations of _begin() which lead to self-deadlock.
443 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
444 * doesn't invoke _begin() on their behalf, and hence the chances of
445 * double invocations are very low. Moreover, there are scenarios
446 * where these checks can emit false-positive warnings in these
447 * drivers; so we avoid that by skipping them altogether.
448 */
449 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
450 && current == policy->transition_task);
451
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530452wait:
453 wait_event(policy->transition_wait, !policy->transition_ongoing);
454
455 spin_lock(&policy->transition_lock);
456
457 if (unlikely(policy->transition_ongoing)) {
458 spin_unlock(&policy->transition_lock);
459 goto wait;
460 }
461
462 policy->transition_ongoing = true;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530463 policy->transition_task = current;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530464
465 spin_unlock(&policy->transition_lock);
466
467 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
468}
469EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
470
471void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
472 struct cpufreq_freqs *freqs, int transition_failed)
473{
474 if (unlikely(WARN_ON(!policy->transition_ongoing)))
475 return;
476
477 cpufreq_notify_post_transition(policy, freqs, transition_failed);
478
479 policy->transition_ongoing = false;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530480 policy->transition_task = NULL;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530481
482 wake_up(&policy->transition_wait);
483}
484EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487/*********************************************************************
488 * SYSFS INTERFACE *
489 *********************************************************************/
Rashika Kheria8a5c74a2014-02-26 22:12:42 +0530490static ssize_t show_boost(struct kobject *kobj,
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100491 struct attribute *attr, char *buf)
492{
493 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
494}
495
496static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
497 const char *buf, size_t count)
498{
499 int ret, enable;
500
501 ret = sscanf(buf, "%d", &enable);
502 if (ret != 1 || enable < 0 || enable > 1)
503 return -EINVAL;
504
505 if (cpufreq_boost_trigger_state(enable)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700506 pr_err("%s: Cannot %s BOOST!\n",
507 __func__, enable ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100508 return -EINVAL;
509 }
510
Joe Perchese837f9b2014-03-11 10:03:00 -0700511 pr_debug("%s: cpufreq BOOST %s\n",
512 __func__, enable ? "enabled" : "disabled");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100513
514 return count;
515}
516define_one_global_rw(boost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530518static struct cpufreq_governor *find_governor(const char *str_governor)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700519{
520 struct cpufreq_governor *t;
521
Viresh Kumarf7b27062015-01-27 14:06:09 +0530522 for_each_governor(t)
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200523 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700524 return t;
525
526 return NULL;
527}
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529/**
530 * cpufreq_parse_governor - parse a governor string
531 */
Dave Jones905d77c2008-03-05 14:28:32 -0500532static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 struct cpufreq_governor **governor)
534{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700535 int err = -EINVAL;
536
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200537 if (!cpufreq_driver)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700538 goto out;
539
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200540 if (cpufreq_driver->setpolicy) {
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200541 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 *policy = CPUFREQ_POLICY_PERFORMANCE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700543 err = 0;
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200544 } else if (!strncasecmp(str_governor, "powersave",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530545 CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 *policy = CPUFREQ_POLICY_POWERSAVE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700547 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 }
Viresh Kumar2e1cc3a2015-01-02 12:34:27 +0530549 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 struct cpufreq_governor *t;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700551
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800552 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700553
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530554 t = find_governor(str_governor);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700555
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700556 if (t == NULL) {
Kees Cook1a8e1462011-05-04 08:38:56 -0700557 int ret;
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700558
Kees Cook1a8e1462011-05-04 08:38:56 -0700559 mutex_unlock(&cpufreq_governor_mutex);
560 ret = request_module("cpufreq_%s", str_governor);
561 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700562
Kees Cook1a8e1462011-05-04 08:38:56 -0700563 if (ret == 0)
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530564 t = find_governor(str_governor);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700565 }
566
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700567 if (t != NULL) {
568 *governor = t;
569 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700571
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800572 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 }
Dave Jones29464f22009-01-18 01:37:11 -0500574out:
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700575 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578/**
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530579 * cpufreq_per_cpu_attr_read() / show_##file_name() -
580 * print out cpufreq information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 *
582 * Write out information from cpufreq_driver->policy[cpu]; object must be
583 * "unsigned int".
584 */
585
Dave Jones32ee8c32006-02-28 00:43:23 -0500586#define show_one(file_name, object) \
587static ssize_t show_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500588(struct cpufreq_policy *policy, char *buf) \
Dave Jones32ee8c32006-02-28 00:43:23 -0500589{ \
Dave Jones29464f22009-01-18 01:37:11 -0500590 return sprintf(buf, "%u\n", policy->object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591}
592
593show_one(cpuinfo_min_freq, cpuinfo.min_freq);
594show_one(cpuinfo_max_freq, cpuinfo.max_freq);
Thomas Renningered129782009-02-04 01:17:41 +0100595show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596show_one(scaling_min_freq, min);
597show_one(scaling_max_freq, max);
Dirk Brandewiec034b022014-10-13 08:37:40 -0700598
Viresh Kumar09347b22015-01-02 12:34:24 +0530599static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
Dirk Brandewiec034b022014-10-13 08:37:40 -0700600{
601 ssize_t ret;
602
603 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
604 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
605 else
606 ret = sprintf(buf, "%u\n", policy->cur);
607 return ret;
608}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Viresh Kumar037ce832013-10-02 14:13:16 +0530610static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530611 struct cpufreq_policy *new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200612
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613/**
614 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
615 */
616#define store_one(file_name, object) \
617static ssize_t store_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500618(struct cpufreq_policy *policy, const char *buf, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619{ \
Vince Hsu619c144c2014-11-10 14:14:50 +0800620 int ret, temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 struct cpufreq_policy new_policy; \
622 \
623 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
624 if (ret) \
625 return -EINVAL; \
626 \
Dave Jones29464f22009-01-18 01:37:11 -0500627 ret = sscanf(buf, "%u", &new_policy.object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 if (ret != 1) \
629 return -EINVAL; \
630 \
Vince Hsu619c144c2014-11-10 14:14:50 +0800631 temp = new_policy.object; \
Viresh Kumar037ce832013-10-02 14:13:16 +0530632 ret = cpufreq_set_policy(policy, &new_policy); \
Vince Hsu619c144c2014-11-10 14:14:50 +0800633 if (!ret) \
634 policy->user_policy.object = temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 \
636 return ret ? ret : count; \
637}
638
Dave Jones29464f22009-01-18 01:37:11 -0500639store_one(scaling_min_freq, min);
640store_one(scaling_max_freq, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
642/**
643 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
644 */
Dave Jones905d77c2008-03-05 14:28:32 -0500645static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
646 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Viresh Kumard92d50a2015-01-02 12:34:29 +0530648 unsigned int cur_freq = __cpufreq_get(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 if (!cur_freq)
650 return sprintf(buf, "<unknown>");
651 return sprintf(buf, "%u\n", cur_freq);
652}
653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654/**
655 * show_scaling_governor - show the current policy for the specified CPU
656 */
Dave Jones905d77c2008-03-05 14:28:32 -0500657static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658{
Dave Jones29464f22009-01-18 01:37:11 -0500659 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 return sprintf(buf, "powersave\n");
661 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
662 return sprintf(buf, "performance\n");
663 else if (policy->governor)
viresh kumar4b972f02012-10-23 01:23:43 +0200664 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
Dave Jones29464f22009-01-18 01:37:11 -0500665 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return -EINVAL;
667}
668
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669/**
670 * store_scaling_governor - store policy for the specified CPU
671 */
Dave Jones905d77c2008-03-05 14:28:32 -0500672static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
673 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530675 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 char str_governor[16];
677 struct cpufreq_policy new_policy;
678
679 ret = cpufreq_get_policy(&new_policy, policy->cpu);
680 if (ret)
681 return ret;
682
Dave Jones29464f22009-01-18 01:37:11 -0500683 ret = sscanf(buf, "%15s", str_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 if (ret != 1)
685 return -EINVAL;
686
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530687 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
688 &new_policy.governor))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 return -EINVAL;
690
Viresh Kumar037ce832013-10-02 14:13:16 +0530691 ret = cpufreq_set_policy(policy, &new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200692
693 policy->user_policy.policy = policy->policy;
694 policy->user_policy.governor = policy->governor;
Thomas Renninger7970e082006-04-13 15:14:04 +0200695
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530696 if (ret)
697 return ret;
698 else
699 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700}
701
702/**
703 * show_scaling_driver - show the cpufreq driver currently loaded
704 */
Dave Jones905d77c2008-03-05 14:28:32 -0500705static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200707 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708}
709
710/**
711 * show_scaling_available_governors - show the available CPUfreq governors
712 */
Dave Jones905d77c2008-03-05 14:28:32 -0500713static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
714 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715{
716 ssize_t i = 0;
717 struct cpufreq_governor *t;
718
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530719 if (!has_target()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 i += sprintf(buf, "performance powersave");
721 goto out;
722 }
723
Viresh Kumarf7b27062015-01-27 14:06:09 +0530724 for_each_governor(t) {
Dave Jones29464f22009-01-18 01:37:11 -0500725 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
726 - (CPUFREQ_NAME_LEN + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 goto out;
viresh kumar4b972f02012-10-23 01:23:43 +0200728 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 }
Dave Jones7d5e3502006-02-02 17:03:42 -0500730out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 i += sprintf(&buf[i], "\n");
732 return i;
733}
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700734
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800735ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736{
737 ssize_t i = 0;
738 unsigned int cpu;
739
Rusty Russell835481d2009-01-04 05:18:06 -0800740 for_each_cpu(cpu, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 if (i)
742 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
743 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
744 if (i >= (PAGE_SIZE - 5))
Dave Jones29464f22009-01-18 01:37:11 -0500745 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 }
747 i += sprintf(&buf[i], "\n");
748 return i;
749}
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800750EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700752/**
753 * show_related_cpus - show the CPUs affected by each transition even if
754 * hw coordination is in use
755 */
756static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
757{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800758 return cpufreq_show_cpus(policy->related_cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700759}
760
761/**
762 * show_affected_cpus - show the CPUs affected by each transition
763 */
764static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
765{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800766 return cpufreq_show_cpus(policy->cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700767}
768
Venki Pallipadi9e769882007-10-26 10:18:21 -0700769static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
Dave Jones905d77c2008-03-05 14:28:32 -0500770 const char *buf, size_t count)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700771{
772 unsigned int freq = 0;
773 unsigned int ret;
774
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700775 if (!policy->governor || !policy->governor->store_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700776 return -EINVAL;
777
778 ret = sscanf(buf, "%u", &freq);
779 if (ret != 1)
780 return -EINVAL;
781
782 policy->governor->store_setspeed(policy, freq);
783
784 return count;
785}
786
787static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
788{
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700789 if (!policy->governor || !policy->governor->show_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700790 return sprintf(buf, "<unsupported>\n");
791
792 return policy->governor->show_setspeed(policy, buf);
793}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794
Thomas Renningere2f74f32009-11-19 12:31:01 +0100795/**
viresh kumar8bf1ac722012-10-23 01:23:33 +0200796 * show_bios_limit - show the current cpufreq HW/BIOS limitation
Thomas Renningere2f74f32009-11-19 12:31:01 +0100797 */
798static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
799{
800 unsigned int limit;
801 int ret;
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200802 if (cpufreq_driver->bios_limit) {
803 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
Thomas Renningere2f74f32009-11-19 12:31:01 +0100804 if (!ret)
805 return sprintf(buf, "%u\n", limit);
806 }
807 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
808}
809
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200810cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
811cpufreq_freq_attr_ro(cpuinfo_min_freq);
812cpufreq_freq_attr_ro(cpuinfo_max_freq);
813cpufreq_freq_attr_ro(cpuinfo_transition_latency);
814cpufreq_freq_attr_ro(scaling_available_governors);
815cpufreq_freq_attr_ro(scaling_driver);
816cpufreq_freq_attr_ro(scaling_cur_freq);
817cpufreq_freq_attr_ro(bios_limit);
818cpufreq_freq_attr_ro(related_cpus);
819cpufreq_freq_attr_ro(affected_cpus);
820cpufreq_freq_attr_rw(scaling_min_freq);
821cpufreq_freq_attr_rw(scaling_max_freq);
822cpufreq_freq_attr_rw(scaling_governor);
823cpufreq_freq_attr_rw(scaling_setspeed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
Dave Jones905d77c2008-03-05 14:28:32 -0500825static struct attribute *default_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 &cpuinfo_min_freq.attr,
827 &cpuinfo_max_freq.attr,
Thomas Renningered129782009-02-04 01:17:41 +0100828 &cpuinfo_transition_latency.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 &scaling_min_freq.attr,
830 &scaling_max_freq.attr,
831 &affected_cpus.attr,
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700832 &related_cpus.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 &scaling_governor.attr,
834 &scaling_driver.attr,
835 &scaling_available_governors.attr,
Venki Pallipadi9e769882007-10-26 10:18:21 -0700836 &scaling_setspeed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 NULL
838};
839
Dave Jones29464f22009-01-18 01:37:11 -0500840#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
841#define to_attr(a) container_of(a, struct freq_attr, attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
Dave Jones29464f22009-01-18 01:37:11 -0500843static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844{
Dave Jones905d77c2008-03-05 14:28:32 -0500845 struct cpufreq_policy *policy = to_policy(kobj);
846 struct freq_attr *fattr = to_attr(attr);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530847 ssize_t ret;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530848
849 if (!down_read_trylock(&cpufreq_rwsem))
Viresh Kumar1b750e32013-10-02 14:13:09 +0530850 return -EINVAL;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800851
viresh kumarad7722d2013-10-18 19:10:15 +0530852 down_read(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800853
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530854 if (fattr->show)
855 ret = fattr->show(policy, buf);
856 else
857 ret = -EIO;
858
viresh kumarad7722d2013-10-18 19:10:15 +0530859 up_read(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530860 up_read(&cpufreq_rwsem);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 return ret;
863}
864
Dave Jones905d77c2008-03-05 14:28:32 -0500865static ssize_t store(struct kobject *kobj, struct attribute *attr,
866 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867{
Dave Jones905d77c2008-03-05 14:28:32 -0500868 struct cpufreq_policy *policy = to_policy(kobj);
869 struct freq_attr *fattr = to_attr(attr);
Dave Jonesa07530b2008-03-05 14:22:25 -0500870 ssize_t ret = -EINVAL;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530871
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530872 get_online_cpus();
873
874 if (!cpu_online(policy->cpu))
875 goto unlock;
876
Viresh Kumar6eed9402013-08-06 22:53:11 +0530877 if (!down_read_trylock(&cpufreq_rwsem))
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530878 goto unlock;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800879
viresh kumarad7722d2013-10-18 19:10:15 +0530880 down_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800881
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530882 if (fattr->store)
883 ret = fattr->store(policy, buf, count);
884 else
885 ret = -EIO;
886
viresh kumarad7722d2013-10-18 19:10:15 +0530887 up_write(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530888
Viresh Kumar6eed9402013-08-06 22:53:11 +0530889 up_read(&cpufreq_rwsem);
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530890unlock:
891 put_online_cpus();
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 return ret;
894}
895
Dave Jones905d77c2008-03-05 14:28:32 -0500896static void cpufreq_sysfs_release(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897{
Dave Jones905d77c2008-03-05 14:28:32 -0500898 struct cpufreq_policy *policy = to_policy(kobj);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200899 pr_debug("last reference is dropped\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 complete(&policy->kobj_unregister);
901}
902
Emese Revfy52cf25d2010-01-19 02:58:23 +0100903static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 .show = show,
905 .store = store,
906};
907
908static struct kobj_type ktype_cpufreq = {
909 .sysfs_ops = &sysfs_ops,
910 .default_attrs = default_attrs,
911 .release = cpufreq_sysfs_release,
912};
913
Viresh Kumar2361be22013-05-17 16:09:09 +0530914struct kobject *cpufreq_global_kobject;
915EXPORT_SYMBOL(cpufreq_global_kobject);
916
917static int cpufreq_global_kobject_usage;
918
919int cpufreq_get_global_kobject(void)
920{
921 if (!cpufreq_global_kobject_usage++)
922 return kobject_add(cpufreq_global_kobject,
923 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
924
925 return 0;
926}
927EXPORT_SYMBOL(cpufreq_get_global_kobject);
928
929void cpufreq_put_global_kobject(void)
930{
931 if (!--cpufreq_global_kobject_usage)
932 kobject_del(cpufreq_global_kobject);
933}
934EXPORT_SYMBOL(cpufreq_put_global_kobject);
935
936int cpufreq_sysfs_create_file(const struct attribute *attr)
937{
938 int ret = cpufreq_get_global_kobject();
939
940 if (!ret) {
941 ret = sysfs_create_file(cpufreq_global_kobject, attr);
942 if (ret)
943 cpufreq_put_global_kobject();
944 }
945
946 return ret;
947}
948EXPORT_SYMBOL(cpufreq_sysfs_create_file);
949
950void cpufreq_sysfs_remove_file(const struct attribute *attr)
951{
952 sysfs_remove_file(cpufreq_global_kobject, attr);
953 cpufreq_put_global_kobject();
954}
955EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
956
Dave Jones19d6f7e2009-07-08 17:35:39 -0400957/* symlink affected CPUs */
Viresh Kumar308b60e2013-07-31 14:35:14 +0200958static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400959{
960 unsigned int j;
961 int ret = 0;
962
963 for_each_cpu(j, policy->cpus) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800964 struct device *cpu_dev;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400965
Viresh Kumar308b60e2013-07-31 14:35:14 +0200966 if (j == policy->cpu)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400967 continue;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400968
Viresh Kumare8fdde12013-07-31 14:31:33 +0200969 pr_debug("Adding link for CPU: %u\n", j);
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800970 cpu_dev = get_cpu_device(j);
971 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
Dave Jones19d6f7e2009-07-08 17:35:39 -0400972 "cpufreq");
Rafael J. Wysocki71c34612013-08-04 01:19:34 +0200973 if (ret)
974 break;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400975 }
976 return ret;
977}
978
Viresh Kumar308b60e2013-07-31 14:35:14 +0200979static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800980 struct device *dev)
Dave Jones909a6942009-07-08 18:05:42 -0400981{
982 struct freq_attr **drv_attr;
Dave Jones909a6942009-07-08 18:05:42 -0400983 int ret = 0;
Dave Jones909a6942009-07-08 18:05:42 -0400984
Dave Jones909a6942009-07-08 18:05:42 -0400985 /* set up files for this cpu device */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200986 drv_attr = cpufreq_driver->attr;
Viresh Kumarf13f1182015-01-02 12:34:23 +0530987 while (drv_attr && *drv_attr) {
Dave Jones909a6942009-07-08 18:05:42 -0400988 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
989 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100990 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400991 drv_attr++;
992 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200993 if (cpufreq_driver->get) {
Dave Jones909a6942009-07-08 18:05:42 -0400994 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
995 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100996 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400997 }
Dirk Brandewiec034b022014-10-13 08:37:40 -0700998
999 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1000 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001001 return ret;
Dirk Brandewiec034b022014-10-13 08:37:40 -07001002
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001003 if (cpufreq_driver->bios_limit) {
Thomas Renningere2f74f32009-11-19 12:31:01 +01001004 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1005 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001006 return ret;
Thomas Renningere2f74f32009-11-19 12:31:01 +01001007 }
Dave Jones909a6942009-07-08 18:05:42 -04001008
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001009 return cpufreq_add_dev_symlink(policy);
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301010}
1011
1012static void cpufreq_init_policy(struct cpufreq_policy *policy)
1013{
viresh kumar6e2c89d2014-03-04 11:43:59 +08001014 struct cpufreq_governor *gov = NULL;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301015 struct cpufreq_policy new_policy;
1016 int ret = 0;
1017
Viresh Kumard5b73cd2013-08-06 22:53:06 +05301018 memcpy(&new_policy, policy, sizeof(*policy));
Jason Barona27a9ab2013-12-19 22:50:50 +00001019
viresh kumar6e2c89d2014-03-04 11:43:59 +08001020 /* Update governor of new_policy to the governor used before hotplug */
Viresh Kumar42f91fa2015-01-02 12:34:26 +05301021 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
viresh kumar6e2c89d2014-03-04 11:43:59 +08001022 if (gov)
1023 pr_debug("Restoring governor %s for cpu %d\n",
1024 policy->governor->name, policy->cpu);
1025 else
1026 gov = CPUFREQ_DEFAULT_GOVERNOR;
1027
1028 new_policy.governor = gov;
1029
Jason Barona27a9ab2013-12-19 22:50:50 +00001030 /* Use the default policy if its valid. */
1031 if (cpufreq_driver->setpolicy)
viresh kumar6e2c89d2014-03-04 11:43:59 +08001032 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
Dave Jonesecf7e462009-07-08 18:48:47 -04001033
1034 /* set default policy */
Viresh Kumar037ce832013-10-02 14:13:16 +05301035 ret = cpufreq_set_policy(policy, &new_policy);
Dave Jonesecf7e462009-07-08 18:48:47 -04001036 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001037 pr_debug("setting policy failed\n");
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001038 if (cpufreq_driver->exit)
1039 cpufreq_driver->exit(policy);
Dave Jonesecf7e462009-07-08 18:48:47 -04001040 }
Dave Jones909a6942009-07-08 18:05:42 -04001041}
1042
Viresh Kumard8d3b472013-08-04 01:20:07 +02001043static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
Viresh Kumar42f921a2013-12-20 21:26:02 +05301044 unsigned int cpu, struct device *dev)
Viresh Kumarfcf80582013-01-29 14:39:08 +00001045{
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301046 int ret = 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001047
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301048 /* Has this CPU been taken care of already? */
1049 if (cpumask_test_cpu(cpu, policy->cpus))
1050 return 0;
1051
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301052 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301053 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1054 if (ret) {
1055 pr_err("%s: Failed to stop governor\n", __func__);
1056 return ret;
1057 }
1058 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001059
viresh kumarad7722d2013-10-18 19:10:15 +05301060 down_write(&policy->rwsem);
Viresh Kumarfcf80582013-01-29 14:39:08 +00001061 cpumask_set_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301062 up_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301063
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301064 if (has_target()) {
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001065 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1066 if (!ret)
1067 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1068
1069 if (ret) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301070 pr_err("%s: Failed to start governor\n", __func__);
1071 return ret;
1072 }
Viresh Kumar820c6ca2013-04-22 00:48:03 +02001073 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001074
Viresh Kumar42f921a2013-12-20 21:26:02 +05301075 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
Viresh Kumarfcf80582013-01-29 14:39:08 +00001076}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301078static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1079{
1080 struct cpufreq_policy *policy;
1081 unsigned long flags;
1082
Lan Tianyu44871c92013-09-11 15:05:05 +08001083 read_lock_irqsave(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301084
1085 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1086
Lan Tianyu44871c92013-09-11 15:05:05 +08001087 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301088
Geert Uytterhoeven09712f52014-11-04 17:05:25 +01001089 if (policy)
1090 policy->governor = NULL;
viresh kumar6e2c89d2014-03-04 11:43:59 +08001091
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301092 return policy;
1093}
1094
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301095static struct cpufreq_policy *cpufreq_policy_alloc(void)
1096{
1097 struct cpufreq_policy *policy;
1098
1099 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1100 if (!policy)
1101 return NULL;
1102
1103 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1104 goto err_free_policy;
1105
1106 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1107 goto err_free_cpumask;
1108
Lukasz Majewskic88a1f82013-08-06 22:53:08 +05301109 INIT_LIST_HEAD(&policy->policy_list);
viresh kumarad7722d2013-10-18 19:10:15 +05301110 init_rwsem(&policy->rwsem);
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +05301111 spin_lock_init(&policy->transition_lock);
1112 init_waitqueue_head(&policy->transition_wait);
Viresh Kumar818c5712015-01-02 12:34:38 +05301113 init_completion(&policy->kobj_unregister);
1114 INIT_WORK(&policy->update, handle_update);
viresh kumarad7722d2013-10-18 19:10:15 +05301115
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301116 return policy;
1117
1118err_free_cpumask:
1119 free_cpumask_var(policy->cpus);
1120err_free_policy:
1121 kfree(policy);
1122
1123 return NULL;
1124}
1125
Viresh Kumar42f921a2013-12-20 21:26:02 +05301126static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1127{
1128 struct kobject *kobj;
1129 struct completion *cmp;
1130
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301131 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1132 CPUFREQ_REMOVE_POLICY, policy);
1133
Viresh Kumar42f921a2013-12-20 21:26:02 +05301134 down_read(&policy->rwsem);
1135 kobj = &policy->kobj;
1136 cmp = &policy->kobj_unregister;
1137 up_read(&policy->rwsem);
1138 kobject_put(kobj);
1139
1140 /*
1141 * We need to make sure that the underlying kobj is
1142 * actually not referenced anymore by anybody before we
1143 * proceed with unloading.
1144 */
1145 pr_debug("waiting for dropping of refcount\n");
1146 wait_for_completion(cmp);
1147 pr_debug("wait complete\n");
1148}
1149
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301150static void cpufreq_policy_free(struct cpufreq_policy *policy)
1151{
Viresh Kumar988bed02015-05-08 11:53:45 +05301152 unsigned long flags;
1153 int cpu;
1154
1155 /* Remove policy from list */
1156 write_lock_irqsave(&cpufreq_driver_lock, flags);
1157 list_del(&policy->policy_list);
1158
1159 for_each_cpu(cpu, policy->related_cpus)
1160 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1161 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1162
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301163 free_cpumask_var(policy->related_cpus);
1164 free_cpumask_var(policy->cpus);
1165 kfree(policy);
1166}
1167
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301168static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1169 struct device *cpu_dev)
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301170{
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301171 int ret;
1172
Srivatsa S. Bhat99ec8992013-09-12 17:29:09 +05301173 if (WARN_ON(cpu == policy->cpu))
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301174 return 0;
1175
1176 /* Move kobject to the new policy->cpu */
1177 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1178 if (ret) {
1179 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1180 return ret;
1181 }
Srivatsa S. Bhatcb38ed52013-09-12 01:43:42 +05301182
viresh kumarad7722d2013-10-18 19:10:15 +05301183 down_write(&policy->rwsem);
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301184 policy->cpu = cpu;
viresh kumarad7722d2013-10-18 19:10:15 +05301185 up_write(&policy->rwsem);
Viresh Kumar8efd5762013-09-17 10:22:11 +05301186
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301187 return 0;
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301188}
1189
Viresh Kumar23faf0b2015-02-19 17:02:04 +05301190/**
1191 * cpufreq_add_dev - add a CPU device
1192 *
1193 * Adds the cpufreq interface for a CPU device.
1194 *
1195 * The Oracle says: try running cpufreq registration/unregistration concurrently
1196 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1197 * mess up, but more thorough testing is needed. - Mathieu
1198 */
1199static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
Viresh Kumarfcf80582013-01-29 14:39:08 +00001201 unsigned int j, cpu = dev->id;
Viresh Kumar65922462013-02-07 10:56:03 +05301202 int ret = -ENOMEM;
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301203 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 unsigned long flags;
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301205 bool recover_policy = cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Ashok Rajc32b6b82005-10-30 14:59:54 -08001207 if (cpu_is_offline(cpu))
1208 return 0;
1209
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001210 pr_debug("adding CPU %u\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Viresh Kumar6eed9402013-08-06 22:53:11 +05301212 if (!down_read_trylock(&cpufreq_rwsem))
1213 return 0;
1214
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301215 /* Check if this CPU already has a policy to manage it */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001216 read_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumarf9637352015-05-12 12:20:11 +05301217 for_each_active_policy(policy) {
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301218 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001219 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301220 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
Viresh Kumar6eed9402013-08-06 22:53:11 +05301221 up_read(&cpufreq_rwsem);
1222 return ret;
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301223 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001224 }
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001225 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001227 /*
1228 * Restore the saved policy when doing light-weight init and fall back
1229 * to the full init if that fails.
1230 */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301231 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001232 if (!policy) {
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301233 recover_policy = false;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301234 policy = cpufreq_policy_alloc();
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001235 if (!policy)
1236 goto nomem_out;
1237 }
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301238
1239 /*
1240 * In the resume path, since we restore a saved policy, the assignment
1241 * to policy->cpu is like an update of the existing policy, rather than
1242 * the creation of a brand new one. So we need to perform this update
1243 * by invoking update_policy_cpu().
1244 */
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301245 if (recover_policy && cpu != policy->cpu)
1246 WARN_ON(update_policy_cpu(policy, cpu, dev));
1247 else
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301248 policy->cpu = cpu;
1249
Rusty Russell835481d2009-01-04 05:18:06 -08001250 cpumask_copy(policy->cpus, cpumask_of(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 /* call driver. From then on the cpufreq must be able
1253 * to accept all calls to ->verify and ->setpolicy for this CPU
1254 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001255 ret = cpufreq_driver->init(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001257 pr_debug("initialization failed\n");
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301258 goto err_set_policy_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 }
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001260
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001261 down_write(&policy->rwsem);
1262
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001263 /* related cpus should atleast have policy->cpus */
1264 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1265
1266 /*
1267 * affected cpus must always be the one, which are online. We aren't
1268 * managing offline cpus here.
1269 */
1270 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1271
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301272 if (!recover_policy) {
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001273 policy->user_policy.min = policy->min;
1274 policy->user_policy.max = policy->max;
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001275
1276 /* prepare interface data */
1277 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1278 &dev->kobj, "cpufreq");
1279 if (ret) {
1280 pr_err("%s: failed to init policy->kobj: %d\n",
1281 __func__, ret);
1282 goto err_init_policy_kobj;
1283 }
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001284
Viresh Kumar988bed02015-05-08 11:53:45 +05301285 write_lock_irqsave(&cpufreq_driver_lock, flags);
1286 for_each_cpu(j, policy->related_cpus)
1287 per_cpu(cpufreq_cpu_data, j) = policy;
1288 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1289 }
Viresh Kumar652ed952014-01-09 20:38:43 +05301290
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01001291 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumarda60ce92013-10-03 20:28:30 +05301292 policy->cur = cpufreq_driver->get(policy->cpu);
1293 if (!policy->cur) {
1294 pr_err("%s: ->get() failed\n", __func__);
1295 goto err_get_freq;
1296 }
1297 }
1298
Viresh Kumard3916692013-12-03 11:20:46 +05301299 /*
1300 * Sometimes boot loaders set CPU frequency to a value outside of
1301 * frequency table present with cpufreq core. In such cases CPU might be
1302 * unstable if it has to run on that frequency for long duration of time
1303 * and so its better to set it to a frequency which is specified in
1304 * freq-table. This also makes cpufreq stats inconsistent as
1305 * cpufreq-stats would fail to register because current frequency of CPU
1306 * isn't found in freq-table.
1307 *
1308 * Because we don't want this change to effect boot process badly, we go
1309 * for the next freq which is >= policy->cur ('cur' must be set by now,
1310 * otherwise we will end up setting freq to lowest of the table as 'cur'
1311 * is initialized to zero).
1312 *
1313 * We are passing target-freq as "policy->cur - 1" otherwise
1314 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1315 * equal to target-freq.
1316 */
1317 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1318 && has_target()) {
1319 /* Are we running at unknown frequency ? */
1320 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1321 if (ret == -EINVAL) {
1322 /* Warn user and fix it */
1323 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1324 __func__, policy->cpu, policy->cur);
1325 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1326 CPUFREQ_RELATION_L);
1327
1328 /*
1329 * Reaching here after boot in a few seconds may not
1330 * mean that system will remain stable at "unknown"
1331 * frequency for longer duration. Hence, a BUG_ON().
1332 */
1333 BUG_ON(ret);
1334 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1335 __func__, policy->cpu, policy->cur);
1336 }
1337 }
1338
Thomas Renningera1531ac2008-07-29 22:32:58 -07001339 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1340 CPUFREQ_START, policy);
1341
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301342 if (!recover_policy) {
Viresh Kumar308b60e2013-07-31 14:35:14 +02001343 ret = cpufreq_add_dev_interface(policy, dev);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301344 if (ret)
1345 goto err_out_unregister;
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301346 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1347 CPUFREQ_CREATE_POLICY, policy);
Dave Jones8ff69732006-03-05 03:37:23 -05001348
Viresh Kumar988bed02015-05-08 11:53:45 +05301349 write_lock_irqsave(&cpufreq_driver_lock, flags);
1350 list_add(&policy->policy_list, &cpufreq_policy_list);
1351 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1352 }
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301353
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301354 cpufreq_init_policy(policy);
1355
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301356 if (!recover_policy) {
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301357 policy->user_policy.policy = policy->policy;
1358 policy->user_policy.governor = policy->governor;
1359 }
Viresh Kumar4e97b632014-03-04 11:44:01 +08001360 up_write(&policy->rwsem);
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301361
Greg Kroah-Hartman038c5b32007-12-17 15:54:39 -04001362 kobject_uevent(&policy->kobj, KOBJ_ADD);
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301363
Viresh Kumar6eed9402013-08-06 22:53:11 +05301364 up_read(&cpufreq_rwsem);
1365
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301366 /* Callback for handling stuff after policy is ready */
1367 if (cpufreq_driver->ready)
1368 cpufreq_driver->ready(policy);
1369
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001370 pr_debug("initialization complete\n");
Dave Jones87c32272006-03-29 01:48:37 -05001371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 return 0;
1373
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374err_out_unregister:
Viresh Kumar652ed952014-01-09 20:38:43 +05301375err_get_freq:
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001376 if (!recover_policy) {
1377 kobject_put(&policy->kobj);
1378 wait_for_completion(&policy->kobj_unregister);
1379 }
1380err_init_policy_kobj:
Prarit Bhargava7106e022014-09-10 10:12:08 -04001381 up_write(&policy->rwsem);
1382
Viresh Kumarda60ce92013-10-03 20:28:30 +05301383 if (cpufreq_driver->exit)
1384 cpufreq_driver->exit(policy);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301385err_set_policy_cpu:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301386 if (recover_policy) {
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001387 /* Do not leave stale fallback data behind. */
1388 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
Viresh Kumar42f921a2013-12-20 21:26:02 +05301389 cpufreq_policy_put_kobj(policy);
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001390 }
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301391 cpufreq_policy_free(policy);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393nomem_out:
Viresh Kumar6eed9402013-08-06 22:53:11 +05301394 up_read(&cpufreq_rwsem);
1395
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 return ret;
1397}
1398
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301399static int __cpufreq_remove_dev_prepare(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301400 struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401{
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301402 unsigned int cpu = dev->id, cpus;
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301403 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 unsigned long flags;
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301405 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001407 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001409 write_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Viresh Kumar988bed02015-05-08 11:53:45 +05301411 policy = cpufreq_cpu_get_raw(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301413 /* Save the policy somewhere when doing a light-weight tear-down */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301414 if (cpufreq_suspended)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301415 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301416
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001417 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301419 if (!policy) {
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001420 pr_debug("%s: No cpu_data found\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301424 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301425 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1426 if (ret) {
1427 pr_err("%s: Failed to stop governor\n", __func__);
1428 return ret;
1429 }
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001430
Dirk Brandewiefa69e332013-02-06 09:02:11 -08001431 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301432 policy->governor->name, CPUFREQ_NAME_LEN);
Viresh Kumardb5f2992015-01-02 12:34:25 +05301433 }
Jacob Shin27ecddc2011-04-27 13:32:11 -05001434
viresh kumarad7722d2013-10-18 19:10:15 +05301435 down_read(&policy->rwsem);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301436 cpus = cpumask_weight(policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301437 up_read(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
Srivatsa S. Bhat61173f22013-09-12 01:43:25 +05301439 if (cpu != policy->cpu) {
viresh kumar6964d912014-02-17 14:52:11 +05301440 sysfs_remove_link(&dev->kobj, "cpufreq");
Viresh Kumar73bf0fc2013-02-05 22:21:14 +01001441 } else if (cpus > 1) {
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301442 /* Nominate new CPU */
1443 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1444 struct device *cpu_dev = get_cpu_device(new_cpu);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301445
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301446 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1447 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1448 if (ret) {
1449 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1450 "cpufreq"))
1451 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1452 __func__, cpu_dev->id);
1453 return ret;
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001454 }
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301455
1456 if (!cpufreq_suspended)
1457 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1458 __func__, new_cpu, cpu);
Preeti U Murthy789ca242014-09-29 15:47:12 +02001459 } else if (cpufreq_driver->stop_cpu) {
Dirk Brandewie367dc4a2014-03-19 08:45:53 -07001460 cpufreq_driver->stop_cpu(policy);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001461 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001462
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301463 return 0;
1464}
1465
1466static int __cpufreq_remove_dev_finish(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301467 struct subsys_interface *sif)
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301468{
Viresh Kumar988bed02015-05-08 11:53:45 +05301469 unsigned int cpu = dev->id;
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301470 int ret;
Viresh Kumar988bed02015-05-08 11:53:45 +05301471 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301472
1473 if (!policy) {
1474 pr_debug("%s: No cpu_data found\n", __func__);
1475 return -EINVAL;
1476 }
1477
viresh kumarad7722d2013-10-18 19:10:15 +05301478 down_write(&policy->rwsem);
Viresh Kumar303ae722015-02-19 17:02:07 +05301479 cpumask_clear_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301480 up_write(&policy->rwsem);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301481
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001482 /* If cpu is last user of policy, free policy */
Viresh Kumar988bed02015-05-08 11:53:45 +05301483 if (policy_is_inactive(policy)) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301484 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301485 ret = __cpufreq_governor(policy,
1486 CPUFREQ_GOV_POLICY_EXIT);
1487 if (ret) {
1488 pr_err("%s: Failed to exit governor\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001489 __func__);
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301490 return ret;
1491 }
Viresh Kumaredab2fb2013-08-20 12:08:22 +05301492 }
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001493
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301494 if (!cpufreq_suspended)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301495 cpufreq_policy_put_kobj(policy);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301496
1497 /*
1498 * Perform the ->exit() even during light-weight tear-down,
1499 * since this is a core component, and is essential for the
1500 * subsequent light-weight ->init() to succeed.
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001501 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001502 if (cpufreq_driver->exit)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301503 cpufreq_driver->exit(policy);
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001504
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301505 if (!cpufreq_suspended)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301506 cpufreq_policy_free(policy);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001507 } else if (has_target()) {
1508 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1509 if (!ret)
1510 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1511
1512 if (ret) {
1513 pr_err("%s: Failed to start governor\n", __func__);
1514 return ret;
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001515 }
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001516 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 return 0;
1519}
1520
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301521/**
Viresh Kumar27a862e2013-10-02 14:13:14 +05301522 * cpufreq_remove_dev - remove a CPU device
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301523 *
1524 * Removes the cpufreq interface for a CPU device.
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301525 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001526static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001527{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001528 unsigned int cpu = dev->id;
Viresh Kumar27a862e2013-10-02 14:13:14 +05301529 int ret;
Venki Pallipadiec282972007-03-26 12:03:19 -07001530
1531 if (cpu_is_offline(cpu))
1532 return 0;
1533
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301534 ret = __cpufreq_remove_dev_prepare(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301535
1536 if (!ret)
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301537 ret = __cpufreq_remove_dev_finish(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301538
1539 return ret;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001540}
1541
David Howells65f27f32006-11-22 14:55:48 +00001542static void handle_update(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543{
David Howells65f27f32006-11-22 14:55:48 +00001544 struct cpufreq_policy *policy =
1545 container_of(work, struct cpufreq_policy, update);
1546 unsigned int cpu = policy->cpu;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001547 pr_debug("handle_update for cpu %u called\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 cpufreq_update_policy(cpu);
1549}
1550
1551/**
Viresh Kumarbb176f72013-06-19 14:19:33 +05301552 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1553 * in deep trouble.
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301554 * @policy: policy managing CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 * @new_freq: CPU frequency the CPU actually runs at
1556 *
Dave Jones29464f22009-01-18 01:37:11 -05001557 * We adjust to current frequency first, and need to clean up later.
1558 * So either call to cpufreq_update_policy() or schedule handle_update()).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 */
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301560static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301561 unsigned int new_freq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562{
1563 struct cpufreq_freqs freqs;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301564
Joe Perchese837f9b2014-03-11 10:03:00 -07001565 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301566 policy->cur, new_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301568 freqs.old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 freqs.new = new_freq;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301570
Viresh Kumar8fec0512014-03-24 13:35:45 +05301571 cpufreq_freq_transition_begin(policy, &freqs);
1572 cpufreq_freq_transition_end(policy, &freqs, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
1574
Dave Jones32ee8c32006-02-28 00:43:23 -05001575/**
Dhaval Giani4ab70df2006-12-13 14:49:15 +05301576 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001577 * @cpu: CPU number
1578 *
1579 * This is the last known freq, without actually getting it from the driver.
1580 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1581 */
1582unsigned int cpufreq_quick_get(unsigned int cpu)
1583{
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001584 struct cpufreq_policy *policy;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301585 unsigned int ret_freq = 0;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001586
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001587 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1588 return cpufreq_driver->get(cpu);
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001589
1590 policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001591 if (policy) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301592 ret_freq = policy->cur;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001593 cpufreq_cpu_put(policy);
1594 }
1595
Dave Jones4d34a672008-02-07 16:33:49 -05001596 return ret_freq;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001597}
1598EXPORT_SYMBOL(cpufreq_quick_get);
1599
Jesse Barnes3d737102011-06-28 10:59:12 -07001600/**
1601 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1602 * @cpu: CPU number
1603 *
1604 * Just return the max possible frequency for a given CPU.
1605 */
1606unsigned int cpufreq_quick_get_max(unsigned int cpu)
1607{
1608 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1609 unsigned int ret_freq = 0;
1610
1611 if (policy) {
1612 ret_freq = policy->max;
1613 cpufreq_cpu_put(policy);
1614 }
1615
1616 return ret_freq;
1617}
1618EXPORT_SYMBOL(cpufreq_quick_get_max);
1619
Viresh Kumard92d50a2015-01-02 12:34:29 +05301620static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301622 unsigned int ret_freq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001624 if (!cpufreq_driver->get)
Dave Jones4d34a672008-02-07 16:33:49 -05001625 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
Viresh Kumard92d50a2015-01-02 12:34:29 +05301627 ret_freq = cpufreq_driver->get(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301629 if (ret_freq && policy->cur &&
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001630 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301631 /* verify no discrepancy between actual and
1632 saved value exists */
1633 if (unlikely(ret_freq != policy->cur)) {
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301634 cpufreq_out_of_sync(policy, ret_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 schedule_work(&policy->update);
1636 }
1637 }
1638
Dave Jones4d34a672008-02-07 16:33:49 -05001639 return ret_freq;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001640}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001642/**
1643 * cpufreq_get - get the current CPU frequency (in kHz)
1644 * @cpu: CPU number
1645 *
1646 * Get the CPU current (static) CPU frequency
1647 */
1648unsigned int cpufreq_get(unsigned int cpu)
1649{
Aaron Plattner999976e2014-03-04 12:42:15 -08001650 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001651 unsigned int ret_freq = 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001652
Aaron Plattner999976e2014-03-04 12:42:15 -08001653 if (policy) {
1654 down_read(&policy->rwsem);
Viresh Kumard92d50a2015-01-02 12:34:29 +05301655 ret_freq = __cpufreq_get(policy);
Aaron Plattner999976e2014-03-04 12:42:15 -08001656 up_read(&policy->rwsem);
Viresh Kumar26ca8692013-09-20 22:37:31 +05301657
Aaron Plattner999976e2014-03-04 12:42:15 -08001658 cpufreq_cpu_put(policy);
1659 }
Viresh Kumar6eed9402013-08-06 22:53:11 +05301660
Dave Jones4d34a672008-02-07 16:33:49 -05001661 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662}
1663EXPORT_SYMBOL(cpufreq_get);
1664
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001665static struct subsys_interface cpufreq_interface = {
1666 .name = "cpufreq",
1667 .subsys = &cpu_subsys,
1668 .add_dev = cpufreq_add_dev,
1669 .remove_dev = cpufreq_remove_dev,
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001670};
1671
Viresh Kumare28867e2014-03-04 11:00:27 +08001672/*
1673 * In case platform wants some specific frequency to be configured
1674 * during suspend..
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001675 */
Viresh Kumare28867e2014-03-04 11:00:27 +08001676int cpufreq_generic_suspend(struct cpufreq_policy *policy)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001677{
Viresh Kumare28867e2014-03-04 11:00:27 +08001678 int ret;
Dave Jones4bc5d342009-08-04 14:03:25 -04001679
Viresh Kumare28867e2014-03-04 11:00:27 +08001680 if (!policy->suspend_freq) {
1681 pr_err("%s: suspend_freq can't be zero\n", __func__);
1682 return -EINVAL;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001683 }
1684
Viresh Kumare28867e2014-03-04 11:00:27 +08001685 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1686 policy->suspend_freq);
1687
1688 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1689 CPUFREQ_RELATION_H);
1690 if (ret)
1691 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1692 __func__, policy->suspend_freq, ret);
1693
Dave Jonesc9060492008-02-07 16:32:18 -05001694 return ret;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001695}
Viresh Kumare28867e2014-03-04 11:00:27 +08001696EXPORT_SYMBOL(cpufreq_generic_suspend);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001697
1698/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001699 * cpufreq_suspend() - Suspend CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001701 * Called during system wide Suspend/Hibernate cycles for suspending governors
1702 * as some platforms can't change frequency after this point in suspend cycle.
1703 * Because some of the devices (like: i2c, regulators, etc) they use for
1704 * changing frequency are suspended quickly after this point.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001706void cpufreq_suspend(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301708 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001710 if (!cpufreq_driver)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001711 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001713 if (!has_target())
Viresh Kumarb1b12bab2014-09-30 09:33:17 +05301714 goto suspend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001716 pr_debug("%s: Suspending Governors\n", __func__);
1717
Viresh Kumarf9637352015-05-12 12:20:11 +05301718 for_each_active_policy(policy) {
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001719 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1720 pr_err("%s: Failed to stop governor for policy: %p\n",
1721 __func__, policy);
1722 else if (cpufreq_driver->suspend
1723 && cpufreq_driver->suspend(policy))
1724 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1725 policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 }
Viresh Kumarb1b12bab2014-09-30 09:33:17 +05301727
1728suspend:
1729 cpufreq_suspended = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730}
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001733 * cpufreq_resume() - Resume CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001735 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1736 * are suspended with cpufreq_suspend().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001738void cpufreq_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 struct cpufreq_policy *policy;
1741
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001742 if (!cpufreq_driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 return;
1744
Lan Tianyu8e304442014-09-18 15:03:07 +08001745 cpufreq_suspended = false;
1746
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001747 if (!has_target())
1748 return;
1749
1750 pr_debug("%s: Resuming Governors\n", __func__);
1751
Viresh Kumarf9637352015-05-12 12:20:11 +05301752 for_each_active_policy(policy) {
Viresh Kumar0c5aa402014-03-24 12:30:29 +05301753 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1754 pr_err("%s: Failed to resume driver: %p\n", __func__,
1755 policy);
1756 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001757 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1758 pr_err("%s: Failed to start governor for policy: %p\n",
1759 __func__, policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 }
Viresh Kumarc75de0a2015-04-02 10:21:33 +05301761
1762 /*
1763 * schedule call cpufreq_update_policy() for first-online CPU, as that
1764 * wouldn't be hotplugged-out on suspend. It will verify that the
1765 * current freq is in sync with what we believe it to be.
1766 */
1767 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1768 if (WARN_ON(!policy))
1769 return;
1770
1771 schedule_work(&policy->update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Borislav Petkov9d950462013-01-20 10:24:28 +00001774/**
1775 * cpufreq_get_current_driver - return current driver's name
1776 *
1777 * Return the name string of the currently loaded cpufreq driver
1778 * or NULL, if none.
1779 */
1780const char *cpufreq_get_current_driver(void)
1781{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001782 if (cpufreq_driver)
1783 return cpufreq_driver->name;
1784
1785 return NULL;
Borislav Petkov9d950462013-01-20 10:24:28 +00001786}
1787EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Thomas Petazzoni51315cd2014-10-19 11:30:27 +02001789/**
1790 * cpufreq_get_driver_data - return current driver data
1791 *
1792 * Return the private data of the currently loaded cpufreq
1793 * driver, or NULL if no cpufreq driver is loaded.
1794 */
1795void *cpufreq_get_driver_data(void)
1796{
1797 if (cpufreq_driver)
1798 return cpufreq_driver->driver_data;
1799
1800 return NULL;
1801}
1802EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1803
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804/*********************************************************************
1805 * NOTIFIER LISTS INTERFACE *
1806 *********************************************************************/
1807
1808/**
1809 * cpufreq_register_notifier - register a driver with cpufreq
1810 * @nb: notifier function to register
1811 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1812 *
Dave Jones32ee8c32006-02-28 00:43:23 -05001813 * Add a driver to one of two lists: either a list of drivers that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 * are notified about clock rate changes (once before and once after
1815 * the transition), or a list of drivers that are notified about
1816 * changes in cpufreq policy.
1817 *
1818 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001819 * blocking_notifier_chain_register.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 */
1821int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1822{
1823 int ret;
1824
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001825 if (cpufreq_disabled())
1826 return -EINVAL;
1827
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -02001828 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1829
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 switch (list) {
1831 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001832 ret = srcu_notifier_chain_register(
Alan Sterne041c682006-03-27 01:16:30 -08001833 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 break;
1835 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001836 ret = blocking_notifier_chain_register(
1837 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 break;
1839 default:
1840 ret = -EINVAL;
1841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
1843 return ret;
1844}
1845EXPORT_SYMBOL(cpufreq_register_notifier);
1846
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847/**
1848 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1849 * @nb: notifier block to be unregistered
Viresh Kumarbb176f72013-06-19 14:19:33 +05301850 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 *
1852 * Remove a driver from the CPU frequency notifier list.
1853 *
1854 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001855 * blocking_notifier_chain_unregister.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 */
1857int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1858{
1859 int ret;
1860
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001861 if (cpufreq_disabled())
1862 return -EINVAL;
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 switch (list) {
1865 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001866 ret = srcu_notifier_chain_unregister(
Alan Sterne041c682006-03-27 01:16:30 -08001867 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 break;
1869 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001870 ret = blocking_notifier_chain_unregister(
1871 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 break;
1873 default:
1874 ret = -EINVAL;
1875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
1877 return ret;
1878}
1879EXPORT_SYMBOL(cpufreq_unregister_notifier);
1880
1881
1882/*********************************************************************
1883 * GOVERNORS *
1884 *********************************************************************/
1885
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301886/* Must set freqs->new to intermediate frequency */
1887static int __target_intermediate(struct cpufreq_policy *policy,
1888 struct cpufreq_freqs *freqs, int index)
1889{
1890 int ret;
1891
1892 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1893
1894 /* We don't need to switch to intermediate freq */
1895 if (!freqs->new)
1896 return 0;
1897
1898 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1899 __func__, policy->cpu, freqs->old, freqs->new);
1900
1901 cpufreq_freq_transition_begin(policy, freqs);
1902 ret = cpufreq_driver->target_intermediate(policy, index);
1903 cpufreq_freq_transition_end(policy, freqs, ret);
1904
1905 if (ret)
1906 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1907 __func__, ret);
1908
1909 return ret;
1910}
1911
Viresh Kumar8d657752014-05-21 14:29:29 +05301912static int __target_index(struct cpufreq_policy *policy,
1913 struct cpufreq_frequency_table *freq_table, int index)
1914{
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301915 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1916 unsigned int intermediate_freq = 0;
Viresh Kumar8d657752014-05-21 14:29:29 +05301917 int retval = -EINVAL;
1918 bool notify;
1919
1920 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
Viresh Kumar8d657752014-05-21 14:29:29 +05301921 if (notify) {
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301922 /* Handle switching to intermediate frequency */
1923 if (cpufreq_driver->get_intermediate) {
1924 retval = __target_intermediate(policy, &freqs, index);
1925 if (retval)
1926 return retval;
Viresh Kumar8d657752014-05-21 14:29:29 +05301927
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301928 intermediate_freq = freqs.new;
1929 /* Set old freq to intermediate */
1930 if (intermediate_freq)
1931 freqs.old = freqs.new;
1932 }
1933
1934 freqs.new = freq_table[index].frequency;
Viresh Kumar8d657752014-05-21 14:29:29 +05301935 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1936 __func__, policy->cpu, freqs.old, freqs.new);
1937
1938 cpufreq_freq_transition_begin(policy, &freqs);
1939 }
1940
1941 retval = cpufreq_driver->target_index(policy, index);
1942 if (retval)
1943 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1944 retval);
1945
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301946 if (notify) {
Viresh Kumar8d657752014-05-21 14:29:29 +05301947 cpufreq_freq_transition_end(policy, &freqs, retval);
1948
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301949 /*
1950 * Failed after setting to intermediate freq? Driver should have
1951 * reverted back to initial frequency and so should we. Check
1952 * here for intermediate_freq instead of get_intermediate, in
1953 * case we have't switched to intermediate freq at all.
1954 */
1955 if (unlikely(retval && intermediate_freq)) {
1956 freqs.old = intermediate_freq;
1957 freqs.new = policy->restore_freq;
1958 cpufreq_freq_transition_begin(policy, &freqs);
1959 cpufreq_freq_transition_end(policy, &freqs, 0);
1960 }
1961 }
1962
Viresh Kumar8d657752014-05-21 14:29:29 +05301963 return retval;
1964}
1965
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966int __cpufreq_driver_target(struct cpufreq_policy *policy,
1967 unsigned int target_freq,
1968 unsigned int relation)
1969{
Viresh Kumar72499242012-10-31 01:28:21 +01001970 unsigned int old_target_freq = target_freq;
Viresh Kumar8d657752014-05-21 14:29:29 +05301971 int retval = -EINVAL;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001972
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001973 if (cpufreq_disabled())
1974 return -ENODEV;
1975
Viresh Kumar72499242012-10-31 01:28:21 +01001976 /* Make sure that target_freq is within supported range */
1977 if (target_freq > policy->max)
1978 target_freq = policy->max;
1979 if (target_freq < policy->min)
1980 target_freq = policy->min;
1981
1982 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001983 policy->cpu, target_freq, relation, old_target_freq);
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001984
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301985 /*
1986 * This might look like a redundant call as we are checking it again
1987 * after finding index. But it is left intentionally for cases where
1988 * exactly same freq is called again and so we can save on few function
1989 * calls.
1990 */
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001991 if (target_freq == policy->cur)
1992 return 0;
1993
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301994 /* Save last value to restore later on errors */
1995 policy->restore_freq = policy->cur;
1996
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001997 if (cpufreq_driver->target)
1998 retval = cpufreq_driver->target(policy, target_freq, relation);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301999 else if (cpufreq_driver->target_index) {
2000 struct cpufreq_frequency_table *freq_table;
2001 int index;
Ashok Raj90d45d12005-11-08 21:34:24 -08002002
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302003 freq_table = cpufreq_frequency_get_table(policy->cpu);
2004 if (unlikely(!freq_table)) {
2005 pr_err("%s: Unable to find freq_table\n", __func__);
2006 goto out;
2007 }
2008
2009 retval = cpufreq_frequency_table_target(policy, freq_table,
2010 target_freq, relation, &index);
2011 if (unlikely(retval)) {
2012 pr_err("%s: Unable to find matching freq\n", __func__);
2013 goto out;
2014 }
2015
Viresh Kumard4019f02013-08-14 19:38:24 +05302016 if (freq_table[index].frequency == policy->cur) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302017 retval = 0;
Viresh Kumard4019f02013-08-14 19:38:24 +05302018 goto out;
2019 }
2020
Viresh Kumar8d657752014-05-21 14:29:29 +05302021 retval = __target_index(policy, freq_table, index);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302022 }
2023
2024out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 return retval;
2026}
2027EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2028
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029int cpufreq_driver_target(struct cpufreq_policy *policy,
2030 unsigned int target_freq,
2031 unsigned int relation)
2032{
Julia Lawallf1829e42008-07-25 22:44:53 +02002033 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034
viresh kumarad7722d2013-10-18 19:10:15 +05302035 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
2037 ret = __cpufreq_driver_target(policy, target_freq, relation);
2038
viresh kumarad7722d2013-10-18 19:10:15 +05302039 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 return ret;
2042}
2043EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2044
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05302045static int __cpufreq_governor(struct cpufreq_policy *policy,
2046 unsigned int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047{
Dave Jonescc993ca2005-07-28 09:43:56 -07002048 int ret;
Thomas Renninger6afde102007-10-02 13:28:13 -07002049
2050 /* Only must be defined when default governor is known to have latency
2051 restrictions, like e.g. conservative or ondemand.
2052 That this is the case is already ensured in Kconfig
2053 */
2054#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2055 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2056#else
2057 struct cpufreq_governor *gov = NULL;
2058#endif
Thomas Renninger1c256242007-10-02 13:28:12 -07002059
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002060 /* Don't start any governor operations if we are entering suspend */
2061 if (cpufreq_suspended)
2062 return 0;
Ethan Zhaocb57720b2014-12-18 15:28:19 +09002063 /*
2064 * Governor might not be initiated here if ACPI _PPC changed
2065 * notification happened, so check it.
2066 */
2067 if (!policy->governor)
2068 return -EINVAL;
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002069
Thomas Renninger1c256242007-10-02 13:28:12 -07002070 if (policy->governor->max_transition_latency &&
2071 policy->cpuinfo.transition_latency >
2072 policy->governor->max_transition_latency) {
Thomas Renninger6afde102007-10-02 13:28:13 -07002073 if (!gov)
2074 return -EINVAL;
2075 else {
Joe Perchese837f9b2014-03-11 10:03:00 -07002076 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2077 policy->governor->name, gov->name);
Thomas Renninger6afde102007-10-02 13:28:13 -07002078 policy->governor = gov;
2079 }
Thomas Renninger1c256242007-10-02 13:28:12 -07002080 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
Viresh Kumarfe492f32013-08-06 22:53:10 +05302082 if (event == CPUFREQ_GOV_POLICY_INIT)
2083 if (!try_module_get(policy->governor->owner))
2084 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002086 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002087 policy->cpu, event);
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002088
2089 mutex_lock(&cpufreq_governor_lock);
Srivatsa S. Bhat56d07db2013-09-07 01:23:55 +05302090 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
Viresh Kumarf73d3932013-08-31 17:53:40 +05302091 || (!policy->governor_enabled
2092 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002093 mutex_unlock(&cpufreq_governor_lock);
2094 return -EBUSY;
2095 }
2096
2097 if (event == CPUFREQ_GOV_STOP)
2098 policy->governor_enabled = false;
2099 else if (event == CPUFREQ_GOV_START)
2100 policy->governor_enabled = true;
2101
2102 mutex_unlock(&cpufreq_governor_lock);
2103
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 ret = policy->governor->governor(policy, event);
2105
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002106 if (!ret) {
2107 if (event == CPUFREQ_GOV_POLICY_INIT)
2108 policy->governor->initialized++;
2109 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2110 policy->governor->initialized--;
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002111 } else {
2112 /* Restore original values */
2113 mutex_lock(&cpufreq_governor_lock);
2114 if (event == CPUFREQ_GOV_STOP)
2115 policy->governor_enabled = true;
2116 else if (event == CPUFREQ_GOV_START)
2117 policy->governor_enabled = false;
2118 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002119 }
Viresh Kumarb3940582013-02-01 05:42:58 +00002120
Viresh Kumarfe492f32013-08-06 22:53:10 +05302121 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2122 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 module_put(policy->governor->owner);
2124
2125 return ret;
2126}
2127
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128int cpufreq_register_governor(struct cpufreq_governor *governor)
2129{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002130 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
2132 if (!governor)
2133 return -EINVAL;
2134
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002135 if (cpufreq_disabled())
2136 return -ENODEV;
2137
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002138 mutex_lock(&cpufreq_governor_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -05002139
Viresh Kumarb3940582013-02-01 05:42:58 +00002140 governor->initialized = 0;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002141 err = -EBUSY;
Viresh Kumar42f91fa2015-01-02 12:34:26 +05302142 if (!find_governor(governor->name)) {
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002143 err = 0;
2144 list_add(&governor->governor_list, &cpufreq_governor_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
Dave Jones32ee8c32006-02-28 00:43:23 -05002147 mutex_unlock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002148 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149}
2150EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2151
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2153{
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002154 int cpu;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002155
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 if (!governor)
2157 return;
2158
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002159 if (cpufreq_disabled())
2160 return;
2161
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002162 for_each_present_cpu(cpu) {
2163 if (cpu_online(cpu))
2164 continue;
2165 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2166 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2167 }
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002168
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002169 mutex_lock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 list_del(&governor->governor_list);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002171 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 return;
2173}
2174EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2175
2176
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177/*********************************************************************
2178 * POLICY INTERFACE *
2179 *********************************************************************/
2180
2181/**
2182 * cpufreq_get_policy - get the current cpufreq_policy
Dave Jones29464f22009-01-18 01:37:11 -05002183 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2184 * is written
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 *
2186 * Reads the current cpufreq policy.
2187 */
2188int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2189{
2190 struct cpufreq_policy *cpu_policy;
2191 if (!policy)
2192 return -EINVAL;
2193
2194 cpu_policy = cpufreq_cpu_get(cpu);
2195 if (!cpu_policy)
2196 return -EINVAL;
2197
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302198 memcpy(policy, cpu_policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
2200 cpufreq_cpu_put(cpu_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 return 0;
2202}
2203EXPORT_SYMBOL(cpufreq_get_policy);
2204
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002205/*
Viresh Kumar037ce832013-10-02 14:13:16 +05302206 * policy : current policy.
2207 * new_policy: policy to be set.
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002208 */
Viresh Kumar037ce832013-10-02 14:13:16 +05302209static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302210 struct cpufreq_policy *new_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211{
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002212 struct cpufreq_governor *old_gov;
2213 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
Joe Perchese837f9b2014-03-11 10:03:00 -07002215 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2216 new_policy->cpu, new_policy->min, new_policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302218 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002220 if (new_policy->min > policy->max || new_policy->max < policy->min)
2221 return -EINVAL;
Mattia Dongili9c9a43e2006-07-05 23:12:20 +02002222
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 /* verify the cpu speed can be set within this limit */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302224 ret = cpufreq_driver->verify(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002226 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 /* adjust if necessary - all reasons */
Alan Sterne041c682006-03-27 01:16:30 -08002229 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302230 CPUFREQ_ADJUST, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232 /* adjust if necessary - hardware incompatibility*/
Alan Sterne041c682006-03-27 01:16:30 -08002233 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302234 CPUFREQ_INCOMPATIBLE, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
Viresh Kumarbb176f72013-06-19 14:19:33 +05302236 /*
2237 * verify the cpu speed can be set within this limit, which might be
2238 * different to the first one
2239 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302240 ret = cpufreq_driver->verify(new_policy);
Alan Sterne041c682006-03-27 01:16:30 -08002241 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002242 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
2244 /* notification of the new policy */
Alan Sterne041c682006-03-27 01:16:30 -08002245 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302246 CPUFREQ_NOTIFY, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302248 policy->min = new_policy->min;
2249 policy->max = new_policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002251 pr_debug("new min and max freqs are %u - %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002252 policy->min, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002254 if (cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302255 policy->policy = new_policy->policy;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002256 pr_debug("setting range\n");
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002257 return cpufreq_driver->setpolicy(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 }
2259
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002260 if (new_policy->governor == policy->governor)
2261 goto out;
2262
2263 pr_debug("governor switch\n");
2264
2265 /* save old, working values */
2266 old_gov = policy->governor;
2267 /* end old governor */
2268 if (old_gov) {
2269 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2270 up_write(&policy->rwsem);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02002271 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002272 down_write(&policy->rwsem);
2273 }
2274
2275 /* start new governor */
2276 policy->governor = new_policy->governor;
2277 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2278 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2279 goto out;
2280
2281 up_write(&policy->rwsem);
2282 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2283 down_write(&policy->rwsem);
2284 }
2285
2286 /* new governor failed, so re-start old one */
2287 pr_debug("starting governor %s failed\n", policy->governor->name);
2288 if (old_gov) {
2289 policy->governor = old_gov;
2290 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2291 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2292 }
2293
2294 return -EINVAL;
2295
2296 out:
2297 pr_debug("governor: change or update limits\n");
2298 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299}
2300
2301/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2303 * @cpu: CPU which shall be re-evaluated
2304 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002305 * Useful for policy notifiers which have different necessities
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 * at different times.
2307 */
2308int cpufreq_update_policy(unsigned int cpu)
2309{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302310 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2311 struct cpufreq_policy new_policy;
Julia Lawallf1829e42008-07-25 22:44:53 +02002312 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002314 if (!policy)
2315 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316
viresh kumarad7722d2013-10-18 19:10:15 +05302317 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002319 pr_debug("updating policy for CPU %u\n", cpu);
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302320 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302321 new_policy.min = policy->user_policy.min;
2322 new_policy.max = policy->user_policy.max;
2323 new_policy.policy = policy->user_policy.policy;
2324 new_policy.governor = policy->user_policy.governor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
Viresh Kumarbb176f72013-06-19 14:19:33 +05302326 /*
2327 * BIOS might change freq behind our back
2328 * -> ask driver for current freq and notify governors about a change
2329 */
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01002330 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302331 new_policy.cur = cpufreq_driver->get(cpu);
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302332 if (WARN_ON(!new_policy.cur)) {
2333 ret = -EIO;
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002334 goto unlock;
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302335 }
2336
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302337 if (!policy->cur) {
Joe Perchese837f9b2014-03-11 10:03:00 -07002338 pr_debug("Driver did not initialize current freq\n");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302339 policy->cur = new_policy.cur;
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002340 } else {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302341 if (policy->cur != new_policy.cur && has_target())
Viresh Kumara1e1dc42015-01-02 12:34:28 +05302342 cpufreq_out_of_sync(policy, new_policy.cur);
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002343 }
Thomas Renninger0961dd02006-01-26 18:46:33 +01002344 }
2345
Viresh Kumar037ce832013-10-02 14:13:16 +05302346 ret = cpufreq_set_policy(policy, &new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002348unlock:
viresh kumarad7722d2013-10-18 19:10:15 +05302349 up_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002350
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302351 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 return ret;
2353}
2354EXPORT_SYMBOL(cpufreq_update_policy);
2355
Paul Gortmaker27609842013-06-19 13:54:04 -04002356static int cpufreq_cpu_callback(struct notifier_block *nfb,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002357 unsigned long action, void *hcpu)
2358{
2359 unsigned int cpu = (unsigned long)hcpu;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002360 struct device *dev;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002361
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002362 dev = get_cpu_device(cpu);
2363 if (dev) {
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302364 switch (action & ~CPU_TASKS_FROZEN) {
Ashok Rajc32b6b82005-10-30 14:59:54 -08002365 case CPU_ONLINE:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302366 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002367 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302368
Ashok Rajc32b6b82005-10-30 14:59:54 -08002369 case CPU_DOWN_PREPARE:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302370 __cpufreq_remove_dev_prepare(dev, NULL);
Srivatsa S. Bhat1aee40a2013-09-07 01:23:27 +05302371 break;
2372
2373 case CPU_POST_DEAD:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302374 __cpufreq_remove_dev_finish(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002375 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302376
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002377 case CPU_DOWN_FAILED:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302378 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002379 break;
2380 }
2381 }
2382 return NOTIFY_OK;
2383}
2384
Neal Buckendahl9c36f742010-06-22 22:02:44 -05002385static struct notifier_block __refdata cpufreq_cpu_notifier = {
Viresh Kumarbb176f72013-06-19 14:19:33 +05302386 .notifier_call = cpufreq_cpu_callback,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002387};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
2389/*********************************************************************
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002390 * BOOST *
2391 *********************************************************************/
2392static int cpufreq_boost_set_sw(int state)
2393{
2394 struct cpufreq_frequency_table *freq_table;
2395 struct cpufreq_policy *policy;
2396 int ret = -EINVAL;
2397
Viresh Kumarf9637352015-05-12 12:20:11 +05302398 for_each_active_policy(policy) {
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002399 freq_table = cpufreq_frequency_get_table(policy->cpu);
2400 if (freq_table) {
2401 ret = cpufreq_frequency_table_cpuinfo(policy,
2402 freq_table);
2403 if (ret) {
2404 pr_err("%s: Policy frequency update failed\n",
2405 __func__);
2406 break;
2407 }
2408 policy->user_policy.max = policy->max;
2409 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2410 }
2411 }
2412
2413 return ret;
2414}
2415
2416int cpufreq_boost_trigger_state(int state)
2417{
2418 unsigned long flags;
2419 int ret = 0;
2420
2421 if (cpufreq_driver->boost_enabled == state)
2422 return 0;
2423
2424 write_lock_irqsave(&cpufreq_driver_lock, flags);
2425 cpufreq_driver->boost_enabled = state;
2426 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2427
2428 ret = cpufreq_driver->set_boost(state);
2429 if (ret) {
2430 write_lock_irqsave(&cpufreq_driver_lock, flags);
2431 cpufreq_driver->boost_enabled = !state;
2432 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2433
Joe Perchese837f9b2014-03-11 10:03:00 -07002434 pr_err("%s: Cannot %s BOOST\n",
2435 __func__, state ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002436 }
2437
2438 return ret;
2439}
2440
2441int cpufreq_boost_supported(void)
2442{
2443 if (likely(cpufreq_driver))
2444 return cpufreq_driver->boost_supported;
2445
2446 return 0;
2447}
2448EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2449
2450int cpufreq_boost_enabled(void)
2451{
2452 return cpufreq_driver->boost_enabled;
2453}
2454EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2455
2456/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2458 *********************************************************************/
2459
2460/**
2461 * cpufreq_register_driver - register a CPU Frequency driver
2462 * @driver_data: A struct cpufreq_driver containing the values#
2463 * submitted by the CPU Frequency driver.
2464 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302465 * Registers a CPU Frequency driver to this core code. This code
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 * returns zero on success, -EBUSY when another driver got here first
Dave Jones32ee8c32006-02-28 00:43:23 -05002467 * (and isn't unregistered in the meantime).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 *
2469 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002470int cpufreq_register_driver(struct cpufreq_driver *driver_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471{
2472 unsigned long flags;
2473 int ret;
2474
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002475 if (cpufreq_disabled())
2476 return -ENODEV;
2477
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 if (!driver_data || !driver_data->verify || !driver_data->init ||
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302479 !(driver_data->setpolicy || driver_data->target_index ||
Rafael J. Wysocki98322352014-03-19 12:48:30 +01002480 driver_data->target) ||
2481 (driver_data->setpolicy && (driver_data->target_index ||
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302482 driver_data->target)) ||
2483 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 return -EINVAL;
2485
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002486 pr_debug("trying to register driver %s\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002488 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002489 if (cpufreq_driver) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002490 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Yinghai Lu4dea58062013-09-18 21:05:20 -07002491 return -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002493 cpufreq_driver = driver_data;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002494 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495
Viresh Kumarbc68b7d2015-01-02 12:34:30 +05302496 if (driver_data->setpolicy)
2497 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2498
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002499 if (cpufreq_boost_supported()) {
2500 /*
2501 * Check if driver provides function to enable boost -
2502 * if not, use cpufreq_boost_set_sw as default
2503 */
2504 if (!cpufreq_driver->set_boost)
2505 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2506
2507 ret = cpufreq_sysfs_create_file(&boost.attr);
2508 if (ret) {
2509 pr_err("%s: cannot register global BOOST sysfs file\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002510 __func__);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002511 goto err_null_driver;
2512 }
2513 }
2514
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002515 ret = subsys_interface_register(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002516 if (ret)
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002517 goto err_boost_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302519 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2520 list_empty(&cpufreq_policy_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 /* if all ->init() calls failed, unregister */
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302522 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2523 driver_data->name);
2524 goto err_if_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 }
2526
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002527 register_hotcpu_notifier(&cpufreq_cpu_notifier);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002528 pr_debug("driver %s up and running\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002530 return 0;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002531err_if_unreg:
2532 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002533err_boost_unreg:
2534 if (cpufreq_boost_supported())
2535 cpufreq_sysfs_remove_file(&boost.attr);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002536err_null_driver:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002537 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002538 cpufreq_driver = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002539 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Dave Jones4d34a672008-02-07 16:33:49 -05002540 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541}
2542EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2543
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544/**
2545 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2546 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302547 * Unregister the current CPUFreq driver. Only call this if you have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 * the right to do so, i.e. if you have succeeded in initialising before!
2549 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2550 * currently not initialised.
2551 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002552int cpufreq_unregister_driver(struct cpufreq_driver *driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553{
2554 unsigned long flags;
2555
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002556 if (!cpufreq_driver || (driver != cpufreq_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002559 pr_debug("unregistering driver %s\n", driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002561 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002562 if (cpufreq_boost_supported())
2563 cpufreq_sysfs_remove_file(&boost.attr);
2564
Chandra Seetharaman65edc682006-06-27 02:54:08 -07002565 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
Viresh Kumar6eed9402013-08-06 22:53:11 +05302567 down_write(&cpufreq_rwsem);
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002568 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302569
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002570 cpufreq_driver = NULL;
Viresh Kumar6eed9402013-08-06 22:53:11 +05302571
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002572 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302573 up_write(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574
2575 return 0;
2576}
2577EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002578
Doug Anderson90de2a42014-12-23 22:09:48 -08002579/*
2580 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2581 * or mutexes when secondary CPUs are halted.
2582 */
2583static struct syscore_ops cpufreq_syscore_ops = {
2584 .shutdown = cpufreq_suspend,
2585};
2586
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002587static int __init cpufreq_core_init(void)
2588{
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002589 if (cpufreq_disabled())
2590 return -ENODEV;
2591
Viresh Kumar2361be22013-05-17 16:09:09 +05302592 cpufreq_global_kobject = kobject_create();
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002593 BUG_ON(!cpufreq_global_kobject);
2594
Doug Anderson90de2a42014-12-23 22:09:48 -08002595 register_syscore_ops(&cpufreq_syscore_ops);
2596
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002597 return 0;
2598}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002599core_initcall(cpufreq_core_init);