blob: e6a63d6ba6f1c36c3fef59cc46d3a412746b956d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
Viresh Kumarbb176f72013-06-19 14:19:33 +05306 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Ashok Rajc32b6b82005-10-30 14:59:54 -08008 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
Dave Jones32ee8c32006-02-28 00:43:23 -05009 * Added handling for CPU hotplug
Dave Jones8ff69732006-03-05 03:37:23 -050010 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
Ashok Rajc32b6b82005-10-30 14:59:54 -080012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Viresh Kumardb701152012-10-23 01:29:03 +020018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Viresh Kumar5ff0a262013-08-06 22:53:03 +053020#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/cpufreq.h>
22#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/device.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053024#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080027#include <linux/mutex.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053028#include <linux/slab.h>
Viresh Kumar2f0aea92014-03-04 11:00:26 +080029#include <linux/suspend.h>
Doug Anderson90de2a42014-12-23 22:09:48 -080030#include <linux/syscore_ops.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053031#include <linux/tick.h>
Thomas Renninger6f4f2722010-04-20 13:17:36 +020032#include <trace/events/power.h>
33
Viresh Kumarb4f06762015-01-27 14:06:08 +053034static LIST_HEAD(cpufreq_policy_list);
Viresh Kumarf9637352015-05-12 12:20:11 +053035
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42{
43 return active == !policy_is_inactive(policy);
44}
45
46/* Finds Next Acive/Inactive policy */
47static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49{
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59}
60
61static struct cpufreq_policy *first_policy(bool active)
62{
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76}
77
78/* Macros to iterate over CPU policies */
79#define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84#define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86#define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89#define for_each_policy(__policy) \
Viresh Kumarb4f06762015-01-27 14:06:08 +053090 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
Viresh Kumarf7b27062015-01-27 14:06:09 +053092/* Iterate over governors */
93static LIST_HEAD(cpufreq_governor_list);
94#define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097/**
Dave Jonescd878472006-08-11 17:59:28 -040098 * The "cpufreq driver" - the arch- or hardware-dependent low
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200102static struct cpufreq_driver *cpufreq_driver;
Mike Travis7a6aedf2008-03-25 15:06:53 -0700103static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530104static DEFINE_RWLOCK(cpufreq_driver_lock);
Jane Li6f1e4ef2014-01-03 17:17:41 +0800105DEFINE_MUTEX(cpufreq_governor_lock);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530106
Thomas Renninger084f3492007-07-09 11:35:28 -0700107/* This one keeps track of the previously set governor of a removed CPU */
Dmitry Monakhove77b89f2009-10-05 00:38:55 +0400108static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Viresh Kumar2f0aea92014-03-04 11:00:26 +0800110/* Flag to suspend/resume CPUFreq governors */
111static bool cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530113static inline bool has_target(void)
114{
115 return cpufreq_driver->target_index || cpufreq_driver->target;
116}
117
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800118/*
Viresh Kumar6eed9402013-08-06 22:53:11 +0530119 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
120 * sections
121 */
122static DECLARE_RWSEM(cpufreq_rwsem);
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124/* internal prototypes */
Dave Jones29464f22009-01-18 01:37:11 -0500125static int __cpufreq_governor(struct cpufreq_policy *policy,
126 unsigned int event);
Viresh Kumard92d50a2015-01-02 12:34:29 +0530127static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
David Howells65f27f32006-11-22 14:55:48 +0000128static void handle_update(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130/**
Dave Jones32ee8c32006-02-28 00:43:23 -0500131 * Two notifier lists: the "policy" list is involved in the
132 * validation process for a new CPU frequency policy; the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 * "transition" list for kernel code that needs to handle
134 * changes to devices when the CPU clock speed changes.
135 * The mutex locks both lists.
136 */
Alan Sterne041c682006-03-27 01:16:30 -0800137static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700138static struct srcu_notifier_head cpufreq_transition_notifier_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200140static bool init_cpufreq_transition_notifier_list_called;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700141static int __init init_cpufreq_transition_notifier_list(void)
142{
143 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200144 init_cpufreq_transition_notifier_list_called = true;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700145 return 0;
146}
Linus Torvaldsb3438f82006-11-20 11:47:18 -0800147pure_initcall(init_cpufreq_transition_notifier_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400149static int off __read_mostly;
Viresh Kumarda584452012-10-26 00:51:32 +0200150static int cpufreq_disabled(void)
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400151{
152 return off;
153}
154void disable_cpufreq(void)
155{
156 off = 1;
157}
Dave Jones29464f22009-01-18 01:37:11 -0500158static DEFINE_MUTEX(cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000160bool have_governor_per_policy(void)
161{
Viresh Kumar0b981e72013-10-02 14:13:18 +0530162 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000163}
Viresh Kumar3f869d62013-05-16 05:09:56 +0000164EXPORT_SYMBOL_GPL(have_governor_per_policy);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000165
Viresh Kumar944e9a02013-05-16 05:09:57 +0000166struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
167{
168 if (have_governor_per_policy())
169 return &policy->kobj;
170 else
171 return cpufreq_global_kobject;
172}
173EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
174
Viresh Kumar72a4ce32013-05-17 11:26:32 +0000175static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
176{
177 u64 idle_time;
178 u64 cur_wall_time;
179 u64 busy_time;
180
181 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
182
183 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
184 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
185 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
186 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
187 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
188 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
189
190 idle_time = cur_wall_time - busy_time;
191 if (wall)
192 *wall = cputime_to_usecs(cur_wall_time);
193
194 return cputime_to_usecs(idle_time);
195}
196
197u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
198{
199 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
200
201 if (idle_time == -1ULL)
202 return get_cpu_idle_time_jiffy(cpu, wall);
203 else if (!io_busy)
204 idle_time += get_cpu_iowait_time_us(cpu, wall);
205
206 return idle_time;
207}
208EXPORT_SYMBOL_GPL(get_cpu_idle_time);
209
Viresh Kumar70e9e772013-10-03 20:29:07 +0530210/*
211 * This is a generic cpufreq init() routine which can be used by cpufreq
212 * drivers of SMP systems. It will do following:
213 * - validate & show freq table passed
214 * - set policies transition latency
215 * - policy->cpus with all possible CPUs
216 */
217int cpufreq_generic_init(struct cpufreq_policy *policy,
218 struct cpufreq_frequency_table *table,
219 unsigned int transition_latency)
220{
221 int ret;
222
223 ret = cpufreq_table_validate_and_show(policy, table);
224 if (ret) {
225 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
226 return ret;
227 }
228
229 policy->cpuinfo.transition_latency = transition_latency;
230
231 /*
232 * The driver only supports the SMP configuartion where all processors
233 * share the clock and voltage and clock.
234 */
235 cpumask_setall(policy->cpus);
236
237 return 0;
238}
239EXPORT_SYMBOL_GPL(cpufreq_generic_init);
240
Viresh Kumar988bed02015-05-08 11:53:45 +0530241/* Only for cpufreq core internal use */
242struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
Viresh Kumar652ed952014-01-09 20:38:43 +0530243{
244 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
245
Viresh Kumar988bed02015-05-08 11:53:45 +0530246 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
247}
248
249unsigned int cpufreq_generic_get(unsigned int cpu)
250{
251 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
252
Viresh Kumar652ed952014-01-09 20:38:43 +0530253 if (!policy || IS_ERR(policy->clk)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700254 pr_err("%s: No %s associated to cpu: %d\n",
255 __func__, policy ? "clk" : "policy", cpu);
Viresh Kumar652ed952014-01-09 20:38:43 +0530256 return 0;
257 }
258
259 return clk_get_rate(policy->clk) / 1000;
260}
261EXPORT_SYMBOL_GPL(cpufreq_generic_get);
262
Viresh Kumar50e9c852015-02-19 17:02:03 +0530263/**
264 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
265 *
266 * @cpu: cpu to find policy for.
267 *
268 * This returns policy for 'cpu', returns NULL if it doesn't exist.
269 * It also increments the kobject reference count to mark it busy and so would
270 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
271 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
272 * freed as that depends on the kobj count.
273 *
274 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
275 * valid policy is found. This is done to make sure the driver doesn't get
276 * unregistered while the policy is being used.
277 *
278 * Return: A valid policy on success, otherwise NULL on failure.
279 */
Viresh Kumar6eed9402013-08-06 22:53:11 +0530280struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530282 struct cpufreq_policy *policy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 unsigned long flags;
284
Viresh Kumar1b947c902015-02-19 17:02:05 +0530285 if (WARN_ON(cpu >= nr_cpu_ids))
Viresh Kumar6eed9402013-08-06 22:53:11 +0530286 return NULL;
287
288 if (!down_read_trylock(&cpufreq_rwsem))
289 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
291 /* get the cpufreq driver */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000292 read_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Viresh Kumar6eed9402013-08-06 22:53:11 +0530294 if (cpufreq_driver) {
295 /* get the CPU */
Viresh Kumar988bed02015-05-08 11:53:45 +0530296 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530297 if (policy)
298 kobject_get(&policy->kobj);
299 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200300
Viresh Kumar6eed9402013-08-06 22:53:11 +0530301 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530303 if (!policy)
Viresh Kumar6eed9402013-08-06 22:53:11 +0530304 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530306 return policy;
Stephen Boyda9144432012-07-20 18:14:38 +0000307}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
309
Viresh Kumar50e9c852015-02-19 17:02:03 +0530310/**
311 * cpufreq_cpu_put: Decrements the usage count of a policy
312 *
313 * @policy: policy earlier returned by cpufreq_cpu_get().
314 *
315 * This decrements the kobject reference count incremented earlier by calling
316 * cpufreq_cpu_get().
317 *
318 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
319 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530320void cpufreq_cpu_put(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530322 kobject_put(&policy->kobj);
323 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
326
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
329 *********************************************************************/
330
331/**
332 * adjust_jiffies - adjust the system "loops_per_jiffy"
333 *
334 * This function alters the system "loops_per_jiffy" for the clock
335 * speed change. Note that loops_per_jiffy cannot be updated on SMP
Dave Jones32ee8c32006-02-28 00:43:23 -0500336 * systems as each CPU might be scaled differently. So, use the arch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 * per-CPU loops_per_jiffy value wherever possible.
338 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800339static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340{
Viresh Kumar39c132e2015-01-02 12:34:34 +0530341#ifndef CONFIG_SMP
342 static unsigned long l_p_j_ref;
343 static unsigned int l_p_j_ref_freq;
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 if (ci->flags & CPUFREQ_CONST_LOOPS)
346 return;
347
348 if (!l_p_j_ref_freq) {
349 l_p_j_ref = loops_per_jiffy;
350 l_p_j_ref_freq = ci->old;
Joe Perchese837f9b2014-03-11 10:03:00 -0700351 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
352 l_p_j_ref, l_p_j_ref_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
Viresh Kumar0b443ea2014-03-19 11:24:58 +0530354 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530355 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
356 ci->new);
Joe Perchese837f9b2014-03-11 10:03:00 -0700357 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
358 loops_per_jiffy, ci->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360#endif
Viresh Kumar39c132e2015-01-02 12:34:34 +0530361}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Viresh Kumar0956df9c2013-06-19 14:19:34 +0530363static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530364 struct cpufreq_freqs *freqs, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365{
366 BUG_ON(irqs_disabled());
367
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000368 if (cpufreq_disabled())
369 return;
370
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200371 freqs->flags = cpufreq_driver->flags;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200372 pr_debug("notification %u of frequency transition to %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -0700373 state, freqs->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 switch (state) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 case CPUFREQ_PRECHANGE:
Dave Jones32ee8c32006-02-28 00:43:23 -0500378 /* detect if the driver reported a value as "old frequency"
Dave Jonese4472cb2006-01-31 15:53:55 -0800379 * which is not equal to what the cpufreq core thinks is
380 * "old frequency".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200382 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800383 if ((policy) && (policy->cpu == freqs->cpu) &&
384 (policy->cur) && (policy->cur != freqs->old)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700385 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
386 freqs->old, policy->cur);
Dave Jonese4472cb2006-01-31 15:53:55 -0800387 freqs->old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 }
389 }
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700390 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800391 CPUFREQ_PRECHANGE, freqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
393 break;
Dave Jonese4472cb2006-01-31 15:53:55 -0800394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 case CPUFREQ_POSTCHANGE:
396 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
Joe Perchese837f9b2014-03-11 10:03:00 -0700397 pr_debug("FREQ: %lu - CPU: %lu\n",
398 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
Thomas Renninger25e41932011-01-03 17:50:44 +0100399 trace_cpu_frequency(freqs->new, freqs->cpu);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700400 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800401 CPUFREQ_POSTCHANGE, freqs);
Dave Jonese4472cb2006-01-31 15:53:55 -0800402 if (likely(policy) && likely(policy->cpu == freqs->cpu))
403 policy->cur = freqs->new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 break;
405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
Viresh Kumarbb176f72013-06-19 14:19:33 +0530407
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530408/**
409 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
410 * on frequency transition.
411 *
412 * This function calls the transition notifiers and the "adjust_jiffies"
413 * function. It is called twice on all CPU frequency changes that have
414 * external effects.
415 */
Viresh Kumar236a9802014-03-24 13:35:46 +0530416static void cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530417 struct cpufreq_freqs *freqs, unsigned int state)
418{
419 for_each_cpu(freqs->cpu, policy->cpus)
420 __cpufreq_notify_transition(policy, freqs, state);
421}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530423/* Do post notifications when there are chances that transition has failed */
Viresh Kumar236a9802014-03-24 13:35:46 +0530424static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530425 struct cpufreq_freqs *freqs, int transition_failed)
426{
427 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
428 if (!transition_failed)
429 return;
430
431 swap(freqs->old, freqs->new);
432 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
433 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
434}
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530435
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530436void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
437 struct cpufreq_freqs *freqs)
438{
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530439
440 /*
441 * Catch double invocations of _begin() which lead to self-deadlock.
442 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
443 * doesn't invoke _begin() on their behalf, and hence the chances of
444 * double invocations are very low. Moreover, there are scenarios
445 * where these checks can emit false-positive warnings in these
446 * drivers; so we avoid that by skipping them altogether.
447 */
448 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
449 && current == policy->transition_task);
450
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530451wait:
452 wait_event(policy->transition_wait, !policy->transition_ongoing);
453
454 spin_lock(&policy->transition_lock);
455
456 if (unlikely(policy->transition_ongoing)) {
457 spin_unlock(&policy->transition_lock);
458 goto wait;
459 }
460
461 policy->transition_ongoing = true;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530462 policy->transition_task = current;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530463
464 spin_unlock(&policy->transition_lock);
465
466 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
467}
468EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
469
470void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
471 struct cpufreq_freqs *freqs, int transition_failed)
472{
473 if (unlikely(WARN_ON(!policy->transition_ongoing)))
474 return;
475
476 cpufreq_notify_post_transition(policy, freqs, transition_failed);
477
478 policy->transition_ongoing = false;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530479 policy->transition_task = NULL;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530480
481 wake_up(&policy->transition_wait);
482}
483EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486/*********************************************************************
487 * SYSFS INTERFACE *
488 *********************************************************************/
Rashika Kheria8a5c74a2014-02-26 22:12:42 +0530489static ssize_t show_boost(struct kobject *kobj,
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100490 struct attribute *attr, char *buf)
491{
492 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
493}
494
495static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
496 const char *buf, size_t count)
497{
498 int ret, enable;
499
500 ret = sscanf(buf, "%d", &enable);
501 if (ret != 1 || enable < 0 || enable > 1)
502 return -EINVAL;
503
504 if (cpufreq_boost_trigger_state(enable)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700505 pr_err("%s: Cannot %s BOOST!\n",
506 __func__, enable ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100507 return -EINVAL;
508 }
509
Joe Perchese837f9b2014-03-11 10:03:00 -0700510 pr_debug("%s: cpufreq BOOST %s\n",
511 __func__, enable ? "enabled" : "disabled");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100512
513 return count;
514}
515define_one_global_rw(boost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530517static struct cpufreq_governor *find_governor(const char *str_governor)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700518{
519 struct cpufreq_governor *t;
520
Viresh Kumarf7b27062015-01-27 14:06:09 +0530521 for_each_governor(t)
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200522 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700523 return t;
524
525 return NULL;
526}
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528/**
529 * cpufreq_parse_governor - parse a governor string
530 */
Dave Jones905d77c2008-03-05 14:28:32 -0500531static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 struct cpufreq_governor **governor)
533{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700534 int err = -EINVAL;
535
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200536 if (!cpufreq_driver)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700537 goto out;
538
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200539 if (cpufreq_driver->setpolicy) {
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200540 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 *policy = CPUFREQ_POLICY_PERFORMANCE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700542 err = 0;
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200543 } else if (!strncasecmp(str_governor, "powersave",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530544 CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 *policy = CPUFREQ_POLICY_POWERSAVE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700546 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 }
Viresh Kumar2e1cc3a2015-01-02 12:34:27 +0530548 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 struct cpufreq_governor *t;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700550
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800551 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700552
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530553 t = find_governor(str_governor);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700554
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700555 if (t == NULL) {
Kees Cook1a8e1462011-05-04 08:38:56 -0700556 int ret;
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700557
Kees Cook1a8e1462011-05-04 08:38:56 -0700558 mutex_unlock(&cpufreq_governor_mutex);
559 ret = request_module("cpufreq_%s", str_governor);
560 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700561
Kees Cook1a8e1462011-05-04 08:38:56 -0700562 if (ret == 0)
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530563 t = find_governor(str_governor);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700564 }
565
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700566 if (t != NULL) {
567 *governor = t;
568 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700570
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800571 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 }
Dave Jones29464f22009-01-18 01:37:11 -0500573out:
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700574 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577/**
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530578 * cpufreq_per_cpu_attr_read() / show_##file_name() -
579 * print out cpufreq information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 *
581 * Write out information from cpufreq_driver->policy[cpu]; object must be
582 * "unsigned int".
583 */
584
Dave Jones32ee8c32006-02-28 00:43:23 -0500585#define show_one(file_name, object) \
586static ssize_t show_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500587(struct cpufreq_policy *policy, char *buf) \
Dave Jones32ee8c32006-02-28 00:43:23 -0500588{ \
Dave Jones29464f22009-01-18 01:37:11 -0500589 return sprintf(buf, "%u\n", policy->object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590}
591
592show_one(cpuinfo_min_freq, cpuinfo.min_freq);
593show_one(cpuinfo_max_freq, cpuinfo.max_freq);
Thomas Renningered129782009-02-04 01:17:41 +0100594show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595show_one(scaling_min_freq, min);
596show_one(scaling_max_freq, max);
Dirk Brandewiec034b022014-10-13 08:37:40 -0700597
Viresh Kumar09347b22015-01-02 12:34:24 +0530598static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
Dirk Brandewiec034b022014-10-13 08:37:40 -0700599{
600 ssize_t ret;
601
602 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
603 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
604 else
605 ret = sprintf(buf, "%u\n", policy->cur);
606 return ret;
607}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
Viresh Kumar037ce832013-10-02 14:13:16 +0530609static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530610 struct cpufreq_policy *new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612/**
613 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
614 */
615#define store_one(file_name, object) \
616static ssize_t store_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500617(struct cpufreq_policy *policy, const char *buf, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{ \
Vince Hsu619c144c2014-11-10 14:14:50 +0800619 int ret, temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 struct cpufreq_policy new_policy; \
621 \
622 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
623 if (ret) \
624 return -EINVAL; \
625 \
Dave Jones29464f22009-01-18 01:37:11 -0500626 ret = sscanf(buf, "%u", &new_policy.object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 if (ret != 1) \
628 return -EINVAL; \
629 \
Vince Hsu619c144c2014-11-10 14:14:50 +0800630 temp = new_policy.object; \
Viresh Kumar037ce832013-10-02 14:13:16 +0530631 ret = cpufreq_set_policy(policy, &new_policy); \
Vince Hsu619c144c2014-11-10 14:14:50 +0800632 if (!ret) \
633 policy->user_policy.object = temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 \
635 return ret ? ret : count; \
636}
637
Dave Jones29464f22009-01-18 01:37:11 -0500638store_one(scaling_min_freq, min);
639store_one(scaling_max_freq, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
641/**
642 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
643 */
Dave Jones905d77c2008-03-05 14:28:32 -0500644static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
645 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646{
Viresh Kumard92d50a2015-01-02 12:34:29 +0530647 unsigned int cur_freq = __cpufreq_get(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 if (!cur_freq)
649 return sprintf(buf, "<unknown>");
650 return sprintf(buf, "%u\n", cur_freq);
651}
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653/**
654 * show_scaling_governor - show the current policy for the specified CPU
655 */
Dave Jones905d77c2008-03-05 14:28:32 -0500656static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
Dave Jones29464f22009-01-18 01:37:11 -0500658 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 return sprintf(buf, "powersave\n");
660 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
661 return sprintf(buf, "performance\n");
662 else if (policy->governor)
viresh kumar4b972f02012-10-23 01:23:43 +0200663 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
Dave Jones29464f22009-01-18 01:37:11 -0500664 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 return -EINVAL;
666}
667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668/**
669 * store_scaling_governor - store policy for the specified CPU
670 */
Dave Jones905d77c2008-03-05 14:28:32 -0500671static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
672 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673{
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530674 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 char str_governor[16];
676 struct cpufreq_policy new_policy;
677
678 ret = cpufreq_get_policy(&new_policy, policy->cpu);
679 if (ret)
680 return ret;
681
Dave Jones29464f22009-01-18 01:37:11 -0500682 ret = sscanf(buf, "%15s", str_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 if (ret != 1)
684 return -EINVAL;
685
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530686 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
687 &new_policy.governor))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 return -EINVAL;
689
Viresh Kumar037ce832013-10-02 14:13:16 +0530690 ret = cpufreq_set_policy(policy, &new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200691
692 policy->user_policy.policy = policy->policy;
693 policy->user_policy.governor = policy->governor;
Thomas Renninger7970e082006-04-13 15:14:04 +0200694
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530695 if (ret)
696 return ret;
697 else
698 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699}
700
701/**
702 * show_scaling_driver - show the cpufreq driver currently loaded
703 */
Dave Jones905d77c2008-03-05 14:28:32 -0500704static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200706 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707}
708
709/**
710 * show_scaling_available_governors - show the available CPUfreq governors
711 */
Dave Jones905d77c2008-03-05 14:28:32 -0500712static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
713 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714{
715 ssize_t i = 0;
716 struct cpufreq_governor *t;
717
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530718 if (!has_target()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 i += sprintf(buf, "performance powersave");
720 goto out;
721 }
722
Viresh Kumarf7b27062015-01-27 14:06:09 +0530723 for_each_governor(t) {
Dave Jones29464f22009-01-18 01:37:11 -0500724 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
725 - (CPUFREQ_NAME_LEN + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 goto out;
viresh kumar4b972f02012-10-23 01:23:43 +0200727 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 }
Dave Jones7d5e3502006-02-02 17:03:42 -0500729out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 i += sprintf(&buf[i], "\n");
731 return i;
732}
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700733
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800734ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
736 ssize_t i = 0;
737 unsigned int cpu;
738
Rusty Russell835481d2009-01-04 05:18:06 -0800739 for_each_cpu(cpu, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 if (i)
741 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
742 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
743 if (i >= (PAGE_SIZE - 5))
Dave Jones29464f22009-01-18 01:37:11 -0500744 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 }
746 i += sprintf(&buf[i], "\n");
747 return i;
748}
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800749EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700751/**
752 * show_related_cpus - show the CPUs affected by each transition even if
753 * hw coordination is in use
754 */
755static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
756{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800757 return cpufreq_show_cpus(policy->related_cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700758}
759
760/**
761 * show_affected_cpus - show the CPUs affected by each transition
762 */
763static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
764{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800765 return cpufreq_show_cpus(policy->cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700766}
767
Venki Pallipadi9e769882007-10-26 10:18:21 -0700768static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
Dave Jones905d77c2008-03-05 14:28:32 -0500769 const char *buf, size_t count)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700770{
771 unsigned int freq = 0;
772 unsigned int ret;
773
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700774 if (!policy->governor || !policy->governor->store_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700775 return -EINVAL;
776
777 ret = sscanf(buf, "%u", &freq);
778 if (ret != 1)
779 return -EINVAL;
780
781 policy->governor->store_setspeed(policy, freq);
782
783 return count;
784}
785
786static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
787{
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700788 if (!policy->governor || !policy->governor->show_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700789 return sprintf(buf, "<unsupported>\n");
790
791 return policy->governor->show_setspeed(policy, buf);
792}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
Thomas Renningere2f74f32009-11-19 12:31:01 +0100794/**
viresh kumar8bf1ac722012-10-23 01:23:33 +0200795 * show_bios_limit - show the current cpufreq HW/BIOS limitation
Thomas Renningere2f74f32009-11-19 12:31:01 +0100796 */
797static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
798{
799 unsigned int limit;
800 int ret;
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200801 if (cpufreq_driver->bios_limit) {
802 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
Thomas Renningere2f74f32009-11-19 12:31:01 +0100803 if (!ret)
804 return sprintf(buf, "%u\n", limit);
805 }
806 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
807}
808
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200809cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
810cpufreq_freq_attr_ro(cpuinfo_min_freq);
811cpufreq_freq_attr_ro(cpuinfo_max_freq);
812cpufreq_freq_attr_ro(cpuinfo_transition_latency);
813cpufreq_freq_attr_ro(scaling_available_governors);
814cpufreq_freq_attr_ro(scaling_driver);
815cpufreq_freq_attr_ro(scaling_cur_freq);
816cpufreq_freq_attr_ro(bios_limit);
817cpufreq_freq_attr_ro(related_cpus);
818cpufreq_freq_attr_ro(affected_cpus);
819cpufreq_freq_attr_rw(scaling_min_freq);
820cpufreq_freq_attr_rw(scaling_max_freq);
821cpufreq_freq_attr_rw(scaling_governor);
822cpufreq_freq_attr_rw(scaling_setspeed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Dave Jones905d77c2008-03-05 14:28:32 -0500824static struct attribute *default_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 &cpuinfo_min_freq.attr,
826 &cpuinfo_max_freq.attr,
Thomas Renningered129782009-02-04 01:17:41 +0100827 &cpuinfo_transition_latency.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 &scaling_min_freq.attr,
829 &scaling_max_freq.attr,
830 &affected_cpus.attr,
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700831 &related_cpus.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 &scaling_governor.attr,
833 &scaling_driver.attr,
834 &scaling_available_governors.attr,
Venki Pallipadi9e769882007-10-26 10:18:21 -0700835 &scaling_setspeed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 NULL
837};
838
Dave Jones29464f22009-01-18 01:37:11 -0500839#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
840#define to_attr(a) container_of(a, struct freq_attr, attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
Dave Jones29464f22009-01-18 01:37:11 -0500842static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
Dave Jones905d77c2008-03-05 14:28:32 -0500844 struct cpufreq_policy *policy = to_policy(kobj);
845 struct freq_attr *fattr = to_attr(attr);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530846 ssize_t ret;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530847
848 if (!down_read_trylock(&cpufreq_rwsem))
Viresh Kumar1b750e32013-10-02 14:13:09 +0530849 return -EINVAL;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800850
viresh kumarad7722d2013-10-18 19:10:15 +0530851 down_read(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800852
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530853 if (fattr->show)
854 ret = fattr->show(policy, buf);
855 else
856 ret = -EIO;
857
viresh kumarad7722d2013-10-18 19:10:15 +0530858 up_read(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530859 up_read(&cpufreq_rwsem);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 return ret;
862}
863
Dave Jones905d77c2008-03-05 14:28:32 -0500864static ssize_t store(struct kobject *kobj, struct attribute *attr,
865 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866{
Dave Jones905d77c2008-03-05 14:28:32 -0500867 struct cpufreq_policy *policy = to_policy(kobj);
868 struct freq_attr *fattr = to_attr(attr);
Dave Jonesa07530b2008-03-05 14:22:25 -0500869 ssize_t ret = -EINVAL;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530870
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530871 get_online_cpus();
872
873 if (!cpu_online(policy->cpu))
874 goto unlock;
875
Viresh Kumar6eed9402013-08-06 22:53:11 +0530876 if (!down_read_trylock(&cpufreq_rwsem))
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530877 goto unlock;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800878
viresh kumarad7722d2013-10-18 19:10:15 +0530879 down_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800880
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530881 if (fattr->store)
882 ret = fattr->store(policy, buf, count);
883 else
884 ret = -EIO;
885
viresh kumarad7722d2013-10-18 19:10:15 +0530886 up_write(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530887
Viresh Kumar6eed9402013-08-06 22:53:11 +0530888 up_read(&cpufreq_rwsem);
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530889unlock:
890 put_online_cpus();
891
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 return ret;
893}
894
Dave Jones905d77c2008-03-05 14:28:32 -0500895static void cpufreq_sysfs_release(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
Dave Jones905d77c2008-03-05 14:28:32 -0500897 struct cpufreq_policy *policy = to_policy(kobj);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200898 pr_debug("last reference is dropped\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 complete(&policy->kobj_unregister);
900}
901
Emese Revfy52cf25d2010-01-19 02:58:23 +0100902static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 .show = show,
904 .store = store,
905};
906
907static struct kobj_type ktype_cpufreq = {
908 .sysfs_ops = &sysfs_ops,
909 .default_attrs = default_attrs,
910 .release = cpufreq_sysfs_release,
911};
912
Viresh Kumar2361be22013-05-17 16:09:09 +0530913struct kobject *cpufreq_global_kobject;
914EXPORT_SYMBOL(cpufreq_global_kobject);
915
916static int cpufreq_global_kobject_usage;
917
918int cpufreq_get_global_kobject(void)
919{
920 if (!cpufreq_global_kobject_usage++)
921 return kobject_add(cpufreq_global_kobject,
922 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
923
924 return 0;
925}
926EXPORT_SYMBOL(cpufreq_get_global_kobject);
927
928void cpufreq_put_global_kobject(void)
929{
930 if (!--cpufreq_global_kobject_usage)
931 kobject_del(cpufreq_global_kobject);
932}
933EXPORT_SYMBOL(cpufreq_put_global_kobject);
934
935int cpufreq_sysfs_create_file(const struct attribute *attr)
936{
937 int ret = cpufreq_get_global_kobject();
938
939 if (!ret) {
940 ret = sysfs_create_file(cpufreq_global_kobject, attr);
941 if (ret)
942 cpufreq_put_global_kobject();
943 }
944
945 return ret;
946}
947EXPORT_SYMBOL(cpufreq_sysfs_create_file);
948
949void cpufreq_sysfs_remove_file(const struct attribute *attr)
950{
951 sysfs_remove_file(cpufreq_global_kobject, attr);
952 cpufreq_put_global_kobject();
953}
954EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
955
Dave Jones19d6f7e2009-07-08 17:35:39 -0400956/* symlink affected CPUs */
Viresh Kumar308b60e2013-07-31 14:35:14 +0200957static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400958{
959 unsigned int j;
960 int ret = 0;
961
962 for_each_cpu(j, policy->cpus) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800963 struct device *cpu_dev;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400964
Viresh Kumar308b60e2013-07-31 14:35:14 +0200965 if (j == policy->cpu)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400966 continue;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400967
Viresh Kumare8fdde12013-07-31 14:31:33 +0200968 pr_debug("Adding link for CPU: %u\n", j);
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800969 cpu_dev = get_cpu_device(j);
970 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
Dave Jones19d6f7e2009-07-08 17:35:39 -0400971 "cpufreq");
Rafael J. Wysocki71c34612013-08-04 01:19:34 +0200972 if (ret)
973 break;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400974 }
975 return ret;
976}
977
Viresh Kumar308b60e2013-07-31 14:35:14 +0200978static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800979 struct device *dev)
Dave Jones909a6942009-07-08 18:05:42 -0400980{
981 struct freq_attr **drv_attr;
Dave Jones909a6942009-07-08 18:05:42 -0400982 int ret = 0;
Dave Jones909a6942009-07-08 18:05:42 -0400983
Dave Jones909a6942009-07-08 18:05:42 -0400984 /* set up files for this cpu device */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200985 drv_attr = cpufreq_driver->attr;
Viresh Kumarf13f1182015-01-02 12:34:23 +0530986 while (drv_attr && *drv_attr) {
Dave Jones909a6942009-07-08 18:05:42 -0400987 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
988 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100989 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400990 drv_attr++;
991 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200992 if (cpufreq_driver->get) {
Dave Jones909a6942009-07-08 18:05:42 -0400993 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
994 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100995 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400996 }
Dirk Brandewiec034b022014-10-13 08:37:40 -0700997
998 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
999 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001000 return ret;
Dirk Brandewiec034b022014-10-13 08:37:40 -07001001
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001002 if (cpufreq_driver->bios_limit) {
Thomas Renningere2f74f32009-11-19 12:31:01 +01001003 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1004 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001005 return ret;
Thomas Renningere2f74f32009-11-19 12:31:01 +01001006 }
Dave Jones909a6942009-07-08 18:05:42 -04001007
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001008 return cpufreq_add_dev_symlink(policy);
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301009}
1010
1011static void cpufreq_init_policy(struct cpufreq_policy *policy)
1012{
viresh kumar6e2c89d2014-03-04 11:43:59 +08001013 struct cpufreq_governor *gov = NULL;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301014 struct cpufreq_policy new_policy;
1015 int ret = 0;
1016
Viresh Kumard5b73cd2013-08-06 22:53:06 +05301017 memcpy(&new_policy, policy, sizeof(*policy));
Jason Barona27a9ab2013-12-19 22:50:50 +00001018
viresh kumar6e2c89d2014-03-04 11:43:59 +08001019 /* Update governor of new_policy to the governor used before hotplug */
Viresh Kumar42f91fa2015-01-02 12:34:26 +05301020 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
viresh kumar6e2c89d2014-03-04 11:43:59 +08001021 if (gov)
1022 pr_debug("Restoring governor %s for cpu %d\n",
1023 policy->governor->name, policy->cpu);
1024 else
1025 gov = CPUFREQ_DEFAULT_GOVERNOR;
1026
1027 new_policy.governor = gov;
1028
Jason Barona27a9ab2013-12-19 22:50:50 +00001029 /* Use the default policy if its valid. */
1030 if (cpufreq_driver->setpolicy)
viresh kumar6e2c89d2014-03-04 11:43:59 +08001031 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
Dave Jonesecf7e462009-07-08 18:48:47 -04001032
1033 /* set default policy */
Viresh Kumar037ce832013-10-02 14:13:16 +05301034 ret = cpufreq_set_policy(policy, &new_policy);
Dave Jonesecf7e462009-07-08 18:48:47 -04001035 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001036 pr_debug("setting policy failed\n");
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001037 if (cpufreq_driver->exit)
1038 cpufreq_driver->exit(policy);
Dave Jonesecf7e462009-07-08 18:48:47 -04001039 }
Dave Jones909a6942009-07-08 18:05:42 -04001040}
1041
Viresh Kumard8d3b472013-08-04 01:20:07 +02001042static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
Viresh Kumar42f921a2013-12-20 21:26:02 +05301043 unsigned int cpu, struct device *dev)
Viresh Kumarfcf80582013-01-29 14:39:08 +00001044{
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301045 int ret = 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001046
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301047 /* Has this CPU been taken care of already? */
1048 if (cpumask_test_cpu(cpu, policy->cpus))
1049 return 0;
1050
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301051 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301052 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1053 if (ret) {
1054 pr_err("%s: Failed to stop governor\n", __func__);
1055 return ret;
1056 }
1057 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001058
viresh kumarad7722d2013-10-18 19:10:15 +05301059 down_write(&policy->rwsem);
Viresh Kumarfcf80582013-01-29 14:39:08 +00001060 cpumask_set_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301061 up_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301062
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301063 if (has_target()) {
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001064 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1065 if (!ret)
1066 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1067
1068 if (ret) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301069 pr_err("%s: Failed to start governor\n", __func__);
1070 return ret;
1071 }
Viresh Kumar820c6ca2013-04-22 00:48:03 +02001072 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001073
Viresh Kumar42f921a2013-12-20 21:26:02 +05301074 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
Viresh Kumarfcf80582013-01-29 14:39:08 +00001075}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301077static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1078{
1079 struct cpufreq_policy *policy;
1080 unsigned long flags;
1081
Lan Tianyu44871c92013-09-11 15:05:05 +08001082 read_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar3914d372015-05-08 11:53:46 +05301083 policy = per_cpu(cpufreq_cpu_data, cpu);
Lan Tianyu44871c92013-09-11 15:05:05 +08001084 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301085
Viresh Kumar3914d372015-05-08 11:53:46 +05301086 if (likely(policy)) {
1087 /* Policy should be inactive here */
1088 WARN_ON(!policy_is_inactive(policy));
Geert Uytterhoeven09712f52014-11-04 17:05:25 +01001089 policy->governor = NULL;
Viresh Kumar3914d372015-05-08 11:53:46 +05301090 }
viresh kumar6e2c89d2014-03-04 11:43:59 +08001091
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301092 return policy;
1093}
1094
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301095static struct cpufreq_policy *cpufreq_policy_alloc(void)
1096{
1097 struct cpufreq_policy *policy;
1098
1099 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1100 if (!policy)
1101 return NULL;
1102
1103 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1104 goto err_free_policy;
1105
1106 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1107 goto err_free_cpumask;
1108
Lukasz Majewskic88a1f82013-08-06 22:53:08 +05301109 INIT_LIST_HEAD(&policy->policy_list);
viresh kumarad7722d2013-10-18 19:10:15 +05301110 init_rwsem(&policy->rwsem);
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +05301111 spin_lock_init(&policy->transition_lock);
1112 init_waitqueue_head(&policy->transition_wait);
Viresh Kumar818c5712015-01-02 12:34:38 +05301113 init_completion(&policy->kobj_unregister);
1114 INIT_WORK(&policy->update, handle_update);
viresh kumarad7722d2013-10-18 19:10:15 +05301115
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301116 return policy;
1117
1118err_free_cpumask:
1119 free_cpumask_var(policy->cpus);
1120err_free_policy:
1121 kfree(policy);
1122
1123 return NULL;
1124}
1125
Viresh Kumar42f921a2013-12-20 21:26:02 +05301126static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1127{
1128 struct kobject *kobj;
1129 struct completion *cmp;
1130
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301131 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1132 CPUFREQ_REMOVE_POLICY, policy);
1133
Viresh Kumar42f921a2013-12-20 21:26:02 +05301134 down_read(&policy->rwsem);
1135 kobj = &policy->kobj;
1136 cmp = &policy->kobj_unregister;
1137 up_read(&policy->rwsem);
1138 kobject_put(kobj);
1139
1140 /*
1141 * We need to make sure that the underlying kobj is
1142 * actually not referenced anymore by anybody before we
1143 * proceed with unloading.
1144 */
1145 pr_debug("waiting for dropping of refcount\n");
1146 wait_for_completion(cmp);
1147 pr_debug("wait complete\n");
1148}
1149
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301150static void cpufreq_policy_free(struct cpufreq_policy *policy)
1151{
Viresh Kumar988bed02015-05-08 11:53:45 +05301152 unsigned long flags;
1153 int cpu;
1154
1155 /* Remove policy from list */
1156 write_lock_irqsave(&cpufreq_driver_lock, flags);
1157 list_del(&policy->policy_list);
1158
1159 for_each_cpu(cpu, policy->related_cpus)
1160 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1161 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1162
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301163 free_cpumask_var(policy->related_cpus);
1164 free_cpumask_var(policy->cpus);
1165 kfree(policy);
1166}
1167
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301168static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1169 struct device *cpu_dev)
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301170{
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301171 int ret;
1172
Srivatsa S. Bhat99ec8992013-09-12 17:29:09 +05301173 if (WARN_ON(cpu == policy->cpu))
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301174 return 0;
1175
1176 /* Move kobject to the new policy->cpu */
1177 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1178 if (ret) {
1179 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1180 return ret;
1181 }
Srivatsa S. Bhatcb38ed52013-09-12 01:43:42 +05301182
viresh kumarad7722d2013-10-18 19:10:15 +05301183 down_write(&policy->rwsem);
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301184 policy->cpu = cpu;
viresh kumarad7722d2013-10-18 19:10:15 +05301185 up_write(&policy->rwsem);
Viresh Kumar8efd5762013-09-17 10:22:11 +05301186
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301187 return 0;
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301188}
1189
Viresh Kumar23faf0b2015-02-19 17:02:04 +05301190/**
1191 * cpufreq_add_dev - add a CPU device
1192 *
1193 * Adds the cpufreq interface for a CPU device.
1194 *
1195 * The Oracle says: try running cpufreq registration/unregistration concurrently
1196 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1197 * mess up, but more thorough testing is needed. - Mathieu
1198 */
1199static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
Viresh Kumarfcf80582013-01-29 14:39:08 +00001201 unsigned int j, cpu = dev->id;
Viresh Kumar65922462013-02-07 10:56:03 +05301202 int ret = -ENOMEM;
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301203 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 unsigned long flags;
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301205 bool recover_policy = cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Ashok Rajc32b6b82005-10-30 14:59:54 -08001207 if (cpu_is_offline(cpu))
1208 return 0;
1209
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001210 pr_debug("adding CPU %u\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Viresh Kumar6eed9402013-08-06 22:53:11 +05301212 if (!down_read_trylock(&cpufreq_rwsem))
1213 return 0;
1214
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301215 /* Check if this CPU already has a policy to manage it */
Viresh Kumar9104bb22015-05-12 12:22:12 +05301216 policy = per_cpu(cpufreq_cpu_data, cpu);
1217 if (policy && !policy_is_inactive(policy)) {
1218 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1219 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1220 up_read(&cpufreq_rwsem);
1221 return ret;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001224 /*
1225 * Restore the saved policy when doing light-weight init and fall back
1226 * to the full init if that fails.
1227 */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301228 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001229 if (!policy) {
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301230 recover_policy = false;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301231 policy = cpufreq_policy_alloc();
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001232 if (!policy)
1233 goto nomem_out;
1234 }
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301235
1236 /*
1237 * In the resume path, since we restore a saved policy, the assignment
1238 * to policy->cpu is like an update of the existing policy, rather than
1239 * the creation of a brand new one. So we need to perform this update
1240 * by invoking update_policy_cpu().
1241 */
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301242 if (recover_policy && cpu != policy->cpu)
1243 WARN_ON(update_policy_cpu(policy, cpu, dev));
1244 else
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301245 policy->cpu = cpu;
1246
Rusty Russell835481d2009-01-04 05:18:06 -08001247 cpumask_copy(policy->cpus, cpumask_of(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 /* call driver. From then on the cpufreq must be able
1250 * to accept all calls to ->verify and ->setpolicy for this CPU
1251 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001252 ret = cpufreq_driver->init(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001254 pr_debug("initialization failed\n");
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301255 goto err_set_policy_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 }
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001257
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001258 down_write(&policy->rwsem);
1259
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001260 /* related cpus should atleast have policy->cpus */
1261 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1262
1263 /*
1264 * affected cpus must always be the one, which are online. We aren't
1265 * managing offline cpus here.
1266 */
1267 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1268
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301269 if (!recover_policy) {
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001270 policy->user_policy.min = policy->min;
1271 policy->user_policy.max = policy->max;
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001272
1273 /* prepare interface data */
1274 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1275 &dev->kobj, "cpufreq");
1276 if (ret) {
1277 pr_err("%s: failed to init policy->kobj: %d\n",
1278 __func__, ret);
1279 goto err_init_policy_kobj;
1280 }
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001281
Viresh Kumar988bed02015-05-08 11:53:45 +05301282 write_lock_irqsave(&cpufreq_driver_lock, flags);
1283 for_each_cpu(j, policy->related_cpus)
1284 per_cpu(cpufreq_cpu_data, j) = policy;
1285 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1286 }
Viresh Kumar652ed952014-01-09 20:38:43 +05301287
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01001288 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumarda60ce92013-10-03 20:28:30 +05301289 policy->cur = cpufreq_driver->get(policy->cpu);
1290 if (!policy->cur) {
1291 pr_err("%s: ->get() failed\n", __func__);
1292 goto err_get_freq;
1293 }
1294 }
1295
Viresh Kumard3916692013-12-03 11:20:46 +05301296 /*
1297 * Sometimes boot loaders set CPU frequency to a value outside of
1298 * frequency table present with cpufreq core. In such cases CPU might be
1299 * unstable if it has to run on that frequency for long duration of time
1300 * and so its better to set it to a frequency which is specified in
1301 * freq-table. This also makes cpufreq stats inconsistent as
1302 * cpufreq-stats would fail to register because current frequency of CPU
1303 * isn't found in freq-table.
1304 *
1305 * Because we don't want this change to effect boot process badly, we go
1306 * for the next freq which is >= policy->cur ('cur' must be set by now,
1307 * otherwise we will end up setting freq to lowest of the table as 'cur'
1308 * is initialized to zero).
1309 *
1310 * We are passing target-freq as "policy->cur - 1" otherwise
1311 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1312 * equal to target-freq.
1313 */
1314 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1315 && has_target()) {
1316 /* Are we running at unknown frequency ? */
1317 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1318 if (ret == -EINVAL) {
1319 /* Warn user and fix it */
1320 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1321 __func__, policy->cpu, policy->cur);
1322 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1323 CPUFREQ_RELATION_L);
1324
1325 /*
1326 * Reaching here after boot in a few seconds may not
1327 * mean that system will remain stable at "unknown"
1328 * frequency for longer duration. Hence, a BUG_ON().
1329 */
1330 BUG_ON(ret);
1331 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1332 __func__, policy->cpu, policy->cur);
1333 }
1334 }
1335
Thomas Renningera1531ac2008-07-29 22:32:58 -07001336 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1337 CPUFREQ_START, policy);
1338
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301339 if (!recover_policy) {
Viresh Kumar308b60e2013-07-31 14:35:14 +02001340 ret = cpufreq_add_dev_interface(policy, dev);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301341 if (ret)
1342 goto err_out_unregister;
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301343 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1344 CPUFREQ_CREATE_POLICY, policy);
Dave Jones8ff69732006-03-05 03:37:23 -05001345
Viresh Kumar988bed02015-05-08 11:53:45 +05301346 write_lock_irqsave(&cpufreq_driver_lock, flags);
1347 list_add(&policy->policy_list, &cpufreq_policy_list);
1348 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1349 }
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301350
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301351 cpufreq_init_policy(policy);
1352
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301353 if (!recover_policy) {
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301354 policy->user_policy.policy = policy->policy;
1355 policy->user_policy.governor = policy->governor;
1356 }
Viresh Kumar4e97b632014-03-04 11:44:01 +08001357 up_write(&policy->rwsem);
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301358
Greg Kroah-Hartman038c5b32007-12-17 15:54:39 -04001359 kobject_uevent(&policy->kobj, KOBJ_ADD);
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301360
Viresh Kumar6eed9402013-08-06 22:53:11 +05301361 up_read(&cpufreq_rwsem);
1362
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301363 /* Callback for handling stuff after policy is ready */
1364 if (cpufreq_driver->ready)
1365 cpufreq_driver->ready(policy);
1366
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001367 pr_debug("initialization complete\n");
Dave Jones87c32272006-03-29 01:48:37 -05001368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 return 0;
1370
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371err_out_unregister:
Viresh Kumar652ed952014-01-09 20:38:43 +05301372err_get_freq:
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001373 if (!recover_policy) {
1374 kobject_put(&policy->kobj);
1375 wait_for_completion(&policy->kobj_unregister);
1376 }
1377err_init_policy_kobj:
Prarit Bhargava7106e022014-09-10 10:12:08 -04001378 up_write(&policy->rwsem);
1379
Viresh Kumarda60ce92013-10-03 20:28:30 +05301380 if (cpufreq_driver->exit)
1381 cpufreq_driver->exit(policy);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301382err_set_policy_cpu:
Viresh Kumar3914d372015-05-08 11:53:46 +05301383 if (recover_policy)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301384 cpufreq_policy_put_kobj(policy);
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301385 cpufreq_policy_free(policy);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387nomem_out:
Viresh Kumar6eed9402013-08-06 22:53:11 +05301388 up_read(&cpufreq_rwsem);
1389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 return ret;
1391}
1392
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301393static int __cpufreq_remove_dev_prepare(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301394 struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395{
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301396 unsigned int cpu = dev->id, cpus;
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301397 int ret;
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301398 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001400 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401
Viresh Kumar988bed02015-05-08 11:53:45 +05301402 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301403 if (!policy) {
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001404 pr_debug("%s: No cpu_data found\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301408 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301409 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1410 if (ret) {
1411 pr_err("%s: Failed to stop governor\n", __func__);
1412 return ret;
1413 }
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001414
Dirk Brandewiefa69e332013-02-06 09:02:11 -08001415 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301416 policy->governor->name, CPUFREQ_NAME_LEN);
Viresh Kumardb5f2992015-01-02 12:34:25 +05301417 }
Jacob Shin27ecddc2011-04-27 13:32:11 -05001418
viresh kumarad7722d2013-10-18 19:10:15 +05301419 down_read(&policy->rwsem);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301420 cpus = cpumask_weight(policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301421 up_read(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
Srivatsa S. Bhat61173f22013-09-12 01:43:25 +05301423 if (cpu != policy->cpu) {
viresh kumar6964d912014-02-17 14:52:11 +05301424 sysfs_remove_link(&dev->kobj, "cpufreq");
Viresh Kumar73bf0fc2013-02-05 22:21:14 +01001425 } else if (cpus > 1) {
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301426 /* Nominate new CPU */
1427 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1428 struct device *cpu_dev = get_cpu_device(new_cpu);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301429
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301430 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1431 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1432 if (ret) {
1433 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1434 "cpufreq"))
1435 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1436 __func__, cpu_dev->id);
1437 return ret;
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001438 }
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301439
1440 if (!cpufreq_suspended)
1441 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1442 __func__, new_cpu, cpu);
Preeti U Murthy789ca242014-09-29 15:47:12 +02001443 } else if (cpufreq_driver->stop_cpu) {
Dirk Brandewie367dc4a2014-03-19 08:45:53 -07001444 cpufreq_driver->stop_cpu(policy);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001445 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001446
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301447 return 0;
1448}
1449
1450static int __cpufreq_remove_dev_finish(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301451 struct subsys_interface *sif)
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301452{
Viresh Kumar988bed02015-05-08 11:53:45 +05301453 unsigned int cpu = dev->id;
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301454 int ret;
Viresh Kumar988bed02015-05-08 11:53:45 +05301455 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301456
1457 if (!policy) {
1458 pr_debug("%s: No cpu_data found\n", __func__);
1459 return -EINVAL;
1460 }
1461
viresh kumarad7722d2013-10-18 19:10:15 +05301462 down_write(&policy->rwsem);
Viresh Kumar303ae722015-02-19 17:02:07 +05301463 cpumask_clear_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301464 up_write(&policy->rwsem);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301465
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001466 /* If cpu is last user of policy, free policy */
Viresh Kumar988bed02015-05-08 11:53:45 +05301467 if (policy_is_inactive(policy)) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301468 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301469 ret = __cpufreq_governor(policy,
1470 CPUFREQ_GOV_POLICY_EXIT);
1471 if (ret) {
1472 pr_err("%s: Failed to exit governor\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001473 __func__);
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301474 return ret;
1475 }
Viresh Kumaredab2fb2013-08-20 12:08:22 +05301476 }
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001477
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301478 if (!cpufreq_suspended)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301479 cpufreq_policy_put_kobj(policy);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301480
1481 /*
1482 * Perform the ->exit() even during light-weight tear-down,
1483 * since this is a core component, and is essential for the
1484 * subsequent light-weight ->init() to succeed.
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001485 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001486 if (cpufreq_driver->exit)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301487 cpufreq_driver->exit(policy);
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001488
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301489 if (!cpufreq_suspended)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301490 cpufreq_policy_free(policy);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001491 } else if (has_target()) {
1492 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1493 if (!ret)
1494 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1495
1496 if (ret) {
1497 pr_err("%s: Failed to start governor\n", __func__);
1498 return ret;
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001499 }
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001500 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 return 0;
1503}
1504
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301505/**
Viresh Kumar27a862e2013-10-02 14:13:14 +05301506 * cpufreq_remove_dev - remove a CPU device
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301507 *
1508 * Removes the cpufreq interface for a CPU device.
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301509 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001510static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001511{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001512 unsigned int cpu = dev->id;
Viresh Kumar27a862e2013-10-02 14:13:14 +05301513 int ret;
Venki Pallipadiec282972007-03-26 12:03:19 -07001514
1515 if (cpu_is_offline(cpu))
1516 return 0;
1517
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301518 ret = __cpufreq_remove_dev_prepare(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301519
1520 if (!ret)
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301521 ret = __cpufreq_remove_dev_finish(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301522
1523 return ret;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001524}
1525
David Howells65f27f32006-11-22 14:55:48 +00001526static void handle_update(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527{
David Howells65f27f32006-11-22 14:55:48 +00001528 struct cpufreq_policy *policy =
1529 container_of(work, struct cpufreq_policy, update);
1530 unsigned int cpu = policy->cpu;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001531 pr_debug("handle_update for cpu %u called\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 cpufreq_update_policy(cpu);
1533}
1534
1535/**
Viresh Kumarbb176f72013-06-19 14:19:33 +05301536 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1537 * in deep trouble.
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301538 * @policy: policy managing CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 * @new_freq: CPU frequency the CPU actually runs at
1540 *
Dave Jones29464f22009-01-18 01:37:11 -05001541 * We adjust to current frequency first, and need to clean up later.
1542 * So either call to cpufreq_update_policy() or schedule handle_update()).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 */
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301544static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301545 unsigned int new_freq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546{
1547 struct cpufreq_freqs freqs;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301548
Joe Perchese837f9b2014-03-11 10:03:00 -07001549 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301550 policy->cur, new_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301552 freqs.old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 freqs.new = new_freq;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301554
Viresh Kumar8fec0512014-03-24 13:35:45 +05301555 cpufreq_freq_transition_begin(policy, &freqs);
1556 cpufreq_freq_transition_end(policy, &freqs, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557}
1558
Dave Jones32ee8c32006-02-28 00:43:23 -05001559/**
Dhaval Giani4ab70df2006-12-13 14:49:15 +05301560 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001561 * @cpu: CPU number
1562 *
1563 * This is the last known freq, without actually getting it from the driver.
1564 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1565 */
1566unsigned int cpufreq_quick_get(unsigned int cpu)
1567{
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001568 struct cpufreq_policy *policy;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301569 unsigned int ret_freq = 0;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001570
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001571 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1572 return cpufreq_driver->get(cpu);
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001573
1574 policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001575 if (policy) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301576 ret_freq = policy->cur;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001577 cpufreq_cpu_put(policy);
1578 }
1579
Dave Jones4d34a672008-02-07 16:33:49 -05001580 return ret_freq;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001581}
1582EXPORT_SYMBOL(cpufreq_quick_get);
1583
Jesse Barnes3d737102011-06-28 10:59:12 -07001584/**
1585 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1586 * @cpu: CPU number
1587 *
1588 * Just return the max possible frequency for a given CPU.
1589 */
1590unsigned int cpufreq_quick_get_max(unsigned int cpu)
1591{
1592 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1593 unsigned int ret_freq = 0;
1594
1595 if (policy) {
1596 ret_freq = policy->max;
1597 cpufreq_cpu_put(policy);
1598 }
1599
1600 return ret_freq;
1601}
1602EXPORT_SYMBOL(cpufreq_quick_get_max);
1603
Viresh Kumard92d50a2015-01-02 12:34:29 +05301604static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301606 unsigned int ret_freq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001608 if (!cpufreq_driver->get)
Dave Jones4d34a672008-02-07 16:33:49 -05001609 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Viresh Kumard92d50a2015-01-02 12:34:29 +05301611 ret_freq = cpufreq_driver->get(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301613 if (ret_freq && policy->cur &&
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001614 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301615 /* verify no discrepancy between actual and
1616 saved value exists */
1617 if (unlikely(ret_freq != policy->cur)) {
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301618 cpufreq_out_of_sync(policy, ret_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 schedule_work(&policy->update);
1620 }
1621 }
1622
Dave Jones4d34a672008-02-07 16:33:49 -05001623 return ret_freq;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001624}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001626/**
1627 * cpufreq_get - get the current CPU frequency (in kHz)
1628 * @cpu: CPU number
1629 *
1630 * Get the CPU current (static) CPU frequency
1631 */
1632unsigned int cpufreq_get(unsigned int cpu)
1633{
Aaron Plattner999976e2014-03-04 12:42:15 -08001634 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001635 unsigned int ret_freq = 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001636
Aaron Plattner999976e2014-03-04 12:42:15 -08001637 if (policy) {
1638 down_read(&policy->rwsem);
Viresh Kumard92d50a2015-01-02 12:34:29 +05301639 ret_freq = __cpufreq_get(policy);
Aaron Plattner999976e2014-03-04 12:42:15 -08001640 up_read(&policy->rwsem);
Viresh Kumar26ca8692013-09-20 22:37:31 +05301641
Aaron Plattner999976e2014-03-04 12:42:15 -08001642 cpufreq_cpu_put(policy);
1643 }
Viresh Kumar6eed9402013-08-06 22:53:11 +05301644
Dave Jones4d34a672008-02-07 16:33:49 -05001645 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646}
1647EXPORT_SYMBOL(cpufreq_get);
1648
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001649static struct subsys_interface cpufreq_interface = {
1650 .name = "cpufreq",
1651 .subsys = &cpu_subsys,
1652 .add_dev = cpufreq_add_dev,
1653 .remove_dev = cpufreq_remove_dev,
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001654};
1655
Viresh Kumare28867e2014-03-04 11:00:27 +08001656/*
1657 * In case platform wants some specific frequency to be configured
1658 * during suspend..
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001659 */
Viresh Kumare28867e2014-03-04 11:00:27 +08001660int cpufreq_generic_suspend(struct cpufreq_policy *policy)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001661{
Viresh Kumare28867e2014-03-04 11:00:27 +08001662 int ret;
Dave Jones4bc5d342009-08-04 14:03:25 -04001663
Viresh Kumare28867e2014-03-04 11:00:27 +08001664 if (!policy->suspend_freq) {
1665 pr_err("%s: suspend_freq can't be zero\n", __func__);
1666 return -EINVAL;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001667 }
1668
Viresh Kumare28867e2014-03-04 11:00:27 +08001669 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1670 policy->suspend_freq);
1671
1672 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1673 CPUFREQ_RELATION_H);
1674 if (ret)
1675 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1676 __func__, policy->suspend_freq, ret);
1677
Dave Jonesc9060492008-02-07 16:32:18 -05001678 return ret;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001679}
Viresh Kumare28867e2014-03-04 11:00:27 +08001680EXPORT_SYMBOL(cpufreq_generic_suspend);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001681
1682/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001683 * cpufreq_suspend() - Suspend CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001685 * Called during system wide Suspend/Hibernate cycles for suspending governors
1686 * as some platforms can't change frequency after this point in suspend cycle.
1687 * Because some of the devices (like: i2c, regulators, etc) they use for
1688 * changing frequency are suspended quickly after this point.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001690void cpufreq_suspend(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301692 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001694 if (!cpufreq_driver)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001695 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001697 if (!has_target())
Viresh Kumarb1b12bab2014-09-30 09:33:17 +05301698 goto suspend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001700 pr_debug("%s: Suspending Governors\n", __func__);
1701
Viresh Kumarf9637352015-05-12 12:20:11 +05301702 for_each_active_policy(policy) {
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001703 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1704 pr_err("%s: Failed to stop governor for policy: %p\n",
1705 __func__, policy);
1706 else if (cpufreq_driver->suspend
1707 && cpufreq_driver->suspend(policy))
1708 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1709 policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 }
Viresh Kumarb1b12bab2014-09-30 09:33:17 +05301711
1712suspend:
1713 cpufreq_suspended = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714}
1715
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001717 * cpufreq_resume() - Resume CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001719 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1720 * are suspended with cpufreq_suspend().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001722void cpufreq_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 struct cpufreq_policy *policy;
1725
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001726 if (!cpufreq_driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 return;
1728
Lan Tianyu8e304442014-09-18 15:03:07 +08001729 cpufreq_suspended = false;
1730
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001731 if (!has_target())
1732 return;
1733
1734 pr_debug("%s: Resuming Governors\n", __func__);
1735
Viresh Kumarf9637352015-05-12 12:20:11 +05301736 for_each_active_policy(policy) {
Viresh Kumar0c5aa402014-03-24 12:30:29 +05301737 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1738 pr_err("%s: Failed to resume driver: %p\n", __func__,
1739 policy);
1740 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001741 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1742 pr_err("%s: Failed to start governor for policy: %p\n",
1743 __func__, policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 }
Viresh Kumarc75de0a2015-04-02 10:21:33 +05301745
1746 /*
1747 * schedule call cpufreq_update_policy() for first-online CPU, as that
1748 * wouldn't be hotplugged-out on suspend. It will verify that the
1749 * current freq is in sync with what we believe it to be.
1750 */
1751 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1752 if (WARN_ON(!policy))
1753 return;
1754
1755 schedule_work(&policy->update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
Borislav Petkov9d950462013-01-20 10:24:28 +00001758/**
1759 * cpufreq_get_current_driver - return current driver's name
1760 *
1761 * Return the name string of the currently loaded cpufreq driver
1762 * or NULL, if none.
1763 */
1764const char *cpufreq_get_current_driver(void)
1765{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001766 if (cpufreq_driver)
1767 return cpufreq_driver->name;
1768
1769 return NULL;
Borislav Petkov9d950462013-01-20 10:24:28 +00001770}
1771EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
Thomas Petazzoni51315cd2014-10-19 11:30:27 +02001773/**
1774 * cpufreq_get_driver_data - return current driver data
1775 *
1776 * Return the private data of the currently loaded cpufreq
1777 * driver, or NULL if no cpufreq driver is loaded.
1778 */
1779void *cpufreq_get_driver_data(void)
1780{
1781 if (cpufreq_driver)
1782 return cpufreq_driver->driver_data;
1783
1784 return NULL;
1785}
1786EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788/*********************************************************************
1789 * NOTIFIER LISTS INTERFACE *
1790 *********************************************************************/
1791
1792/**
1793 * cpufreq_register_notifier - register a driver with cpufreq
1794 * @nb: notifier function to register
1795 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1796 *
Dave Jones32ee8c32006-02-28 00:43:23 -05001797 * Add a driver to one of two lists: either a list of drivers that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 * are notified about clock rate changes (once before and once after
1799 * the transition), or a list of drivers that are notified about
1800 * changes in cpufreq policy.
1801 *
1802 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001803 * blocking_notifier_chain_register.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 */
1805int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1806{
1807 int ret;
1808
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001809 if (cpufreq_disabled())
1810 return -EINVAL;
1811
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -02001812 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1813
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 switch (list) {
1815 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001816 ret = srcu_notifier_chain_register(
Alan Sterne041c682006-03-27 01:16:30 -08001817 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 break;
1819 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001820 ret = blocking_notifier_chain_register(
1821 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 break;
1823 default:
1824 ret = -EINVAL;
1825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
1827 return ret;
1828}
1829EXPORT_SYMBOL(cpufreq_register_notifier);
1830
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831/**
1832 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1833 * @nb: notifier block to be unregistered
Viresh Kumarbb176f72013-06-19 14:19:33 +05301834 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 *
1836 * Remove a driver from the CPU frequency notifier list.
1837 *
1838 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001839 * blocking_notifier_chain_unregister.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 */
1841int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1842{
1843 int ret;
1844
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001845 if (cpufreq_disabled())
1846 return -EINVAL;
1847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 switch (list) {
1849 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001850 ret = srcu_notifier_chain_unregister(
Alan Sterne041c682006-03-27 01:16:30 -08001851 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 break;
1853 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001854 ret = blocking_notifier_chain_unregister(
1855 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 break;
1857 default:
1858 ret = -EINVAL;
1859 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860
1861 return ret;
1862}
1863EXPORT_SYMBOL(cpufreq_unregister_notifier);
1864
1865
1866/*********************************************************************
1867 * GOVERNORS *
1868 *********************************************************************/
1869
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301870/* Must set freqs->new to intermediate frequency */
1871static int __target_intermediate(struct cpufreq_policy *policy,
1872 struct cpufreq_freqs *freqs, int index)
1873{
1874 int ret;
1875
1876 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1877
1878 /* We don't need to switch to intermediate freq */
1879 if (!freqs->new)
1880 return 0;
1881
1882 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1883 __func__, policy->cpu, freqs->old, freqs->new);
1884
1885 cpufreq_freq_transition_begin(policy, freqs);
1886 ret = cpufreq_driver->target_intermediate(policy, index);
1887 cpufreq_freq_transition_end(policy, freqs, ret);
1888
1889 if (ret)
1890 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1891 __func__, ret);
1892
1893 return ret;
1894}
1895
Viresh Kumar8d657752014-05-21 14:29:29 +05301896static int __target_index(struct cpufreq_policy *policy,
1897 struct cpufreq_frequency_table *freq_table, int index)
1898{
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301899 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1900 unsigned int intermediate_freq = 0;
Viresh Kumar8d657752014-05-21 14:29:29 +05301901 int retval = -EINVAL;
1902 bool notify;
1903
1904 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
Viresh Kumar8d657752014-05-21 14:29:29 +05301905 if (notify) {
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301906 /* Handle switching to intermediate frequency */
1907 if (cpufreq_driver->get_intermediate) {
1908 retval = __target_intermediate(policy, &freqs, index);
1909 if (retval)
1910 return retval;
Viresh Kumar8d657752014-05-21 14:29:29 +05301911
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301912 intermediate_freq = freqs.new;
1913 /* Set old freq to intermediate */
1914 if (intermediate_freq)
1915 freqs.old = freqs.new;
1916 }
1917
1918 freqs.new = freq_table[index].frequency;
Viresh Kumar8d657752014-05-21 14:29:29 +05301919 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1920 __func__, policy->cpu, freqs.old, freqs.new);
1921
1922 cpufreq_freq_transition_begin(policy, &freqs);
1923 }
1924
1925 retval = cpufreq_driver->target_index(policy, index);
1926 if (retval)
1927 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1928 retval);
1929
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301930 if (notify) {
Viresh Kumar8d657752014-05-21 14:29:29 +05301931 cpufreq_freq_transition_end(policy, &freqs, retval);
1932
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301933 /*
1934 * Failed after setting to intermediate freq? Driver should have
1935 * reverted back to initial frequency and so should we. Check
1936 * here for intermediate_freq instead of get_intermediate, in
1937 * case we have't switched to intermediate freq at all.
1938 */
1939 if (unlikely(retval && intermediate_freq)) {
1940 freqs.old = intermediate_freq;
1941 freqs.new = policy->restore_freq;
1942 cpufreq_freq_transition_begin(policy, &freqs);
1943 cpufreq_freq_transition_end(policy, &freqs, 0);
1944 }
1945 }
1946
Viresh Kumar8d657752014-05-21 14:29:29 +05301947 return retval;
1948}
1949
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950int __cpufreq_driver_target(struct cpufreq_policy *policy,
1951 unsigned int target_freq,
1952 unsigned int relation)
1953{
Viresh Kumar72499242012-10-31 01:28:21 +01001954 unsigned int old_target_freq = target_freq;
Viresh Kumar8d657752014-05-21 14:29:29 +05301955 int retval = -EINVAL;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001956
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001957 if (cpufreq_disabled())
1958 return -ENODEV;
1959
Viresh Kumar72499242012-10-31 01:28:21 +01001960 /* Make sure that target_freq is within supported range */
1961 if (target_freq > policy->max)
1962 target_freq = policy->max;
1963 if (target_freq < policy->min)
1964 target_freq = policy->min;
1965
1966 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001967 policy->cpu, target_freq, relation, old_target_freq);
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001968
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301969 /*
1970 * This might look like a redundant call as we are checking it again
1971 * after finding index. But it is left intentionally for cases where
1972 * exactly same freq is called again and so we can save on few function
1973 * calls.
1974 */
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001975 if (target_freq == policy->cur)
1976 return 0;
1977
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301978 /* Save last value to restore later on errors */
1979 policy->restore_freq = policy->cur;
1980
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001981 if (cpufreq_driver->target)
1982 retval = cpufreq_driver->target(policy, target_freq, relation);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301983 else if (cpufreq_driver->target_index) {
1984 struct cpufreq_frequency_table *freq_table;
1985 int index;
Ashok Raj90d45d12005-11-08 21:34:24 -08001986
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301987 freq_table = cpufreq_frequency_get_table(policy->cpu);
1988 if (unlikely(!freq_table)) {
1989 pr_err("%s: Unable to find freq_table\n", __func__);
1990 goto out;
1991 }
1992
1993 retval = cpufreq_frequency_table_target(policy, freq_table,
1994 target_freq, relation, &index);
1995 if (unlikely(retval)) {
1996 pr_err("%s: Unable to find matching freq\n", __func__);
1997 goto out;
1998 }
1999
Viresh Kumard4019f02013-08-14 19:38:24 +05302000 if (freq_table[index].frequency == policy->cur) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302001 retval = 0;
Viresh Kumard4019f02013-08-14 19:38:24 +05302002 goto out;
2003 }
2004
Viresh Kumar8d657752014-05-21 14:29:29 +05302005 retval = __target_index(policy, freq_table, index);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302006 }
2007
2008out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 return retval;
2010}
2011EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013int cpufreq_driver_target(struct cpufreq_policy *policy,
2014 unsigned int target_freq,
2015 unsigned int relation)
2016{
Julia Lawallf1829e42008-07-25 22:44:53 +02002017 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
viresh kumarad7722d2013-10-18 19:10:15 +05302019 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020
2021 ret = __cpufreq_driver_target(policy, target_freq, relation);
2022
viresh kumarad7722d2013-10-18 19:10:15 +05302023 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 return ret;
2026}
2027EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2028
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05302029static int __cpufreq_governor(struct cpufreq_policy *policy,
2030 unsigned int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031{
Dave Jonescc993ca2005-07-28 09:43:56 -07002032 int ret;
Thomas Renninger6afde102007-10-02 13:28:13 -07002033
2034 /* Only must be defined when default governor is known to have latency
2035 restrictions, like e.g. conservative or ondemand.
2036 That this is the case is already ensured in Kconfig
2037 */
2038#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2039 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2040#else
2041 struct cpufreq_governor *gov = NULL;
2042#endif
Thomas Renninger1c256242007-10-02 13:28:12 -07002043
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002044 /* Don't start any governor operations if we are entering suspend */
2045 if (cpufreq_suspended)
2046 return 0;
Ethan Zhaocb57720b2014-12-18 15:28:19 +09002047 /*
2048 * Governor might not be initiated here if ACPI _PPC changed
2049 * notification happened, so check it.
2050 */
2051 if (!policy->governor)
2052 return -EINVAL;
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002053
Thomas Renninger1c256242007-10-02 13:28:12 -07002054 if (policy->governor->max_transition_latency &&
2055 policy->cpuinfo.transition_latency >
2056 policy->governor->max_transition_latency) {
Thomas Renninger6afde102007-10-02 13:28:13 -07002057 if (!gov)
2058 return -EINVAL;
2059 else {
Joe Perchese837f9b2014-03-11 10:03:00 -07002060 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2061 policy->governor->name, gov->name);
Thomas Renninger6afde102007-10-02 13:28:13 -07002062 policy->governor = gov;
2063 }
Thomas Renninger1c256242007-10-02 13:28:12 -07002064 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Viresh Kumarfe492f32013-08-06 22:53:10 +05302066 if (event == CPUFREQ_GOV_POLICY_INIT)
2067 if (!try_module_get(policy->governor->owner))
2068 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002070 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002071 policy->cpu, event);
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002072
2073 mutex_lock(&cpufreq_governor_lock);
Srivatsa S. Bhat56d07db2013-09-07 01:23:55 +05302074 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
Viresh Kumarf73d3932013-08-31 17:53:40 +05302075 || (!policy->governor_enabled
2076 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002077 mutex_unlock(&cpufreq_governor_lock);
2078 return -EBUSY;
2079 }
2080
2081 if (event == CPUFREQ_GOV_STOP)
2082 policy->governor_enabled = false;
2083 else if (event == CPUFREQ_GOV_START)
2084 policy->governor_enabled = true;
2085
2086 mutex_unlock(&cpufreq_governor_lock);
2087
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 ret = policy->governor->governor(policy, event);
2089
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002090 if (!ret) {
2091 if (event == CPUFREQ_GOV_POLICY_INIT)
2092 policy->governor->initialized++;
2093 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2094 policy->governor->initialized--;
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002095 } else {
2096 /* Restore original values */
2097 mutex_lock(&cpufreq_governor_lock);
2098 if (event == CPUFREQ_GOV_STOP)
2099 policy->governor_enabled = true;
2100 else if (event == CPUFREQ_GOV_START)
2101 policy->governor_enabled = false;
2102 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002103 }
Viresh Kumarb3940582013-02-01 05:42:58 +00002104
Viresh Kumarfe492f32013-08-06 22:53:10 +05302105 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2106 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 module_put(policy->governor->owner);
2108
2109 return ret;
2110}
2111
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112int cpufreq_register_governor(struct cpufreq_governor *governor)
2113{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002114 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
2116 if (!governor)
2117 return -EINVAL;
2118
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002119 if (cpufreq_disabled())
2120 return -ENODEV;
2121
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002122 mutex_lock(&cpufreq_governor_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -05002123
Viresh Kumarb3940582013-02-01 05:42:58 +00002124 governor->initialized = 0;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002125 err = -EBUSY;
Viresh Kumar42f91fa2015-01-02 12:34:26 +05302126 if (!find_governor(governor->name)) {
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002127 err = 0;
2128 list_add(&governor->governor_list, &cpufreq_governor_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Dave Jones32ee8c32006-02-28 00:43:23 -05002131 mutex_unlock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002132 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133}
2134EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2137{
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002138 int cpu;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 if (!governor)
2141 return;
2142
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002143 if (cpufreq_disabled())
2144 return;
2145
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002146 for_each_present_cpu(cpu) {
2147 if (cpu_online(cpu))
2148 continue;
2149 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2150 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2151 }
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002152
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002153 mutex_lock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 list_del(&governor->governor_list);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002155 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 return;
2157}
2158EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2159
2160
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161/*********************************************************************
2162 * POLICY INTERFACE *
2163 *********************************************************************/
2164
2165/**
2166 * cpufreq_get_policy - get the current cpufreq_policy
Dave Jones29464f22009-01-18 01:37:11 -05002167 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2168 * is written
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 *
2170 * Reads the current cpufreq policy.
2171 */
2172int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2173{
2174 struct cpufreq_policy *cpu_policy;
2175 if (!policy)
2176 return -EINVAL;
2177
2178 cpu_policy = cpufreq_cpu_get(cpu);
2179 if (!cpu_policy)
2180 return -EINVAL;
2181
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302182 memcpy(policy, cpu_policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
2184 cpufreq_cpu_put(cpu_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 return 0;
2186}
2187EXPORT_SYMBOL(cpufreq_get_policy);
2188
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002189/*
Viresh Kumar037ce832013-10-02 14:13:16 +05302190 * policy : current policy.
2191 * new_policy: policy to be set.
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002192 */
Viresh Kumar037ce832013-10-02 14:13:16 +05302193static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302194 struct cpufreq_policy *new_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195{
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002196 struct cpufreq_governor *old_gov;
2197 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
Joe Perchese837f9b2014-03-11 10:03:00 -07002199 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2200 new_policy->cpu, new_policy->min, new_policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302202 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002204 if (new_policy->min > policy->max || new_policy->max < policy->min)
2205 return -EINVAL;
Mattia Dongili9c9a43e2006-07-05 23:12:20 +02002206
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 /* verify the cpu speed can be set within this limit */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302208 ret = cpufreq_driver->verify(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002210 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 /* adjust if necessary - all reasons */
Alan Sterne041c682006-03-27 01:16:30 -08002213 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302214 CPUFREQ_ADJUST, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216 /* adjust if necessary - hardware incompatibility*/
Alan Sterne041c682006-03-27 01:16:30 -08002217 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302218 CPUFREQ_INCOMPATIBLE, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Viresh Kumarbb176f72013-06-19 14:19:33 +05302220 /*
2221 * verify the cpu speed can be set within this limit, which might be
2222 * different to the first one
2223 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302224 ret = cpufreq_driver->verify(new_policy);
Alan Sterne041c682006-03-27 01:16:30 -08002225 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002226 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
2228 /* notification of the new policy */
Alan Sterne041c682006-03-27 01:16:30 -08002229 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302230 CPUFREQ_NOTIFY, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302232 policy->min = new_policy->min;
2233 policy->max = new_policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002235 pr_debug("new min and max freqs are %u - %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002236 policy->min, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002238 if (cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302239 policy->policy = new_policy->policy;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002240 pr_debug("setting range\n");
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002241 return cpufreq_driver->setpolicy(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 }
2243
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002244 if (new_policy->governor == policy->governor)
2245 goto out;
2246
2247 pr_debug("governor switch\n");
2248
2249 /* save old, working values */
2250 old_gov = policy->governor;
2251 /* end old governor */
2252 if (old_gov) {
2253 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2254 up_write(&policy->rwsem);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02002255 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002256 down_write(&policy->rwsem);
2257 }
2258
2259 /* start new governor */
2260 policy->governor = new_policy->governor;
2261 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2262 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2263 goto out;
2264
2265 up_write(&policy->rwsem);
2266 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2267 down_write(&policy->rwsem);
2268 }
2269
2270 /* new governor failed, so re-start old one */
2271 pr_debug("starting governor %s failed\n", policy->governor->name);
2272 if (old_gov) {
2273 policy->governor = old_gov;
2274 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2275 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2276 }
2277
2278 return -EINVAL;
2279
2280 out:
2281 pr_debug("governor: change or update limits\n");
2282 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283}
2284
2285/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2287 * @cpu: CPU which shall be re-evaluated
2288 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002289 * Useful for policy notifiers which have different necessities
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 * at different times.
2291 */
2292int cpufreq_update_policy(unsigned int cpu)
2293{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302294 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2295 struct cpufreq_policy new_policy;
Julia Lawallf1829e42008-07-25 22:44:53 +02002296 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002298 if (!policy)
2299 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
viresh kumarad7722d2013-10-18 19:10:15 +05302301 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002303 pr_debug("updating policy for CPU %u\n", cpu);
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302304 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302305 new_policy.min = policy->user_policy.min;
2306 new_policy.max = policy->user_policy.max;
2307 new_policy.policy = policy->user_policy.policy;
2308 new_policy.governor = policy->user_policy.governor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309
Viresh Kumarbb176f72013-06-19 14:19:33 +05302310 /*
2311 * BIOS might change freq behind our back
2312 * -> ask driver for current freq and notify governors about a change
2313 */
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01002314 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302315 new_policy.cur = cpufreq_driver->get(cpu);
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302316 if (WARN_ON(!new_policy.cur)) {
2317 ret = -EIO;
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002318 goto unlock;
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302319 }
2320
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302321 if (!policy->cur) {
Joe Perchese837f9b2014-03-11 10:03:00 -07002322 pr_debug("Driver did not initialize current freq\n");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302323 policy->cur = new_policy.cur;
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002324 } else {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302325 if (policy->cur != new_policy.cur && has_target())
Viresh Kumara1e1dc42015-01-02 12:34:28 +05302326 cpufreq_out_of_sync(policy, new_policy.cur);
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002327 }
Thomas Renninger0961dd02006-01-26 18:46:33 +01002328 }
2329
Viresh Kumar037ce832013-10-02 14:13:16 +05302330 ret = cpufreq_set_policy(policy, &new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002332unlock:
viresh kumarad7722d2013-10-18 19:10:15 +05302333 up_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002334
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302335 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 return ret;
2337}
2338EXPORT_SYMBOL(cpufreq_update_policy);
2339
Paul Gortmaker27609842013-06-19 13:54:04 -04002340static int cpufreq_cpu_callback(struct notifier_block *nfb,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002341 unsigned long action, void *hcpu)
2342{
2343 unsigned int cpu = (unsigned long)hcpu;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002344 struct device *dev;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002345
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002346 dev = get_cpu_device(cpu);
2347 if (dev) {
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302348 switch (action & ~CPU_TASKS_FROZEN) {
Ashok Rajc32b6b82005-10-30 14:59:54 -08002349 case CPU_ONLINE:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302350 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002351 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302352
Ashok Rajc32b6b82005-10-30 14:59:54 -08002353 case CPU_DOWN_PREPARE:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302354 __cpufreq_remove_dev_prepare(dev, NULL);
Srivatsa S. Bhat1aee40a2013-09-07 01:23:27 +05302355 break;
2356
2357 case CPU_POST_DEAD:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302358 __cpufreq_remove_dev_finish(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002359 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302360
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002361 case CPU_DOWN_FAILED:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302362 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002363 break;
2364 }
2365 }
2366 return NOTIFY_OK;
2367}
2368
Neal Buckendahl9c36f742010-06-22 22:02:44 -05002369static struct notifier_block __refdata cpufreq_cpu_notifier = {
Viresh Kumarbb176f72013-06-19 14:19:33 +05302370 .notifier_call = cpufreq_cpu_callback,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002371};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372
2373/*********************************************************************
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002374 * BOOST *
2375 *********************************************************************/
2376static int cpufreq_boost_set_sw(int state)
2377{
2378 struct cpufreq_frequency_table *freq_table;
2379 struct cpufreq_policy *policy;
2380 int ret = -EINVAL;
2381
Viresh Kumarf9637352015-05-12 12:20:11 +05302382 for_each_active_policy(policy) {
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002383 freq_table = cpufreq_frequency_get_table(policy->cpu);
2384 if (freq_table) {
2385 ret = cpufreq_frequency_table_cpuinfo(policy,
2386 freq_table);
2387 if (ret) {
2388 pr_err("%s: Policy frequency update failed\n",
2389 __func__);
2390 break;
2391 }
2392 policy->user_policy.max = policy->max;
2393 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2394 }
2395 }
2396
2397 return ret;
2398}
2399
2400int cpufreq_boost_trigger_state(int state)
2401{
2402 unsigned long flags;
2403 int ret = 0;
2404
2405 if (cpufreq_driver->boost_enabled == state)
2406 return 0;
2407
2408 write_lock_irqsave(&cpufreq_driver_lock, flags);
2409 cpufreq_driver->boost_enabled = state;
2410 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2411
2412 ret = cpufreq_driver->set_boost(state);
2413 if (ret) {
2414 write_lock_irqsave(&cpufreq_driver_lock, flags);
2415 cpufreq_driver->boost_enabled = !state;
2416 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2417
Joe Perchese837f9b2014-03-11 10:03:00 -07002418 pr_err("%s: Cannot %s BOOST\n",
2419 __func__, state ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002420 }
2421
2422 return ret;
2423}
2424
2425int cpufreq_boost_supported(void)
2426{
2427 if (likely(cpufreq_driver))
2428 return cpufreq_driver->boost_supported;
2429
2430 return 0;
2431}
2432EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2433
2434int cpufreq_boost_enabled(void)
2435{
2436 return cpufreq_driver->boost_enabled;
2437}
2438EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2439
2440/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2442 *********************************************************************/
2443
2444/**
2445 * cpufreq_register_driver - register a CPU Frequency driver
2446 * @driver_data: A struct cpufreq_driver containing the values#
2447 * submitted by the CPU Frequency driver.
2448 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302449 * Registers a CPU Frequency driver to this core code. This code
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 * returns zero on success, -EBUSY when another driver got here first
Dave Jones32ee8c32006-02-28 00:43:23 -05002451 * (and isn't unregistered in the meantime).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 *
2453 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002454int cpufreq_register_driver(struct cpufreq_driver *driver_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455{
2456 unsigned long flags;
2457 int ret;
2458
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002459 if (cpufreq_disabled())
2460 return -ENODEV;
2461
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 if (!driver_data || !driver_data->verify || !driver_data->init ||
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302463 !(driver_data->setpolicy || driver_data->target_index ||
Rafael J. Wysocki98322352014-03-19 12:48:30 +01002464 driver_data->target) ||
2465 (driver_data->setpolicy && (driver_data->target_index ||
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302466 driver_data->target)) ||
2467 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 return -EINVAL;
2469
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002470 pr_debug("trying to register driver %s\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002472 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002473 if (cpufreq_driver) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002474 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Yinghai Lu4dea58062013-09-18 21:05:20 -07002475 return -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002477 cpufreq_driver = driver_data;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002478 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479
Viresh Kumarbc68b7d2015-01-02 12:34:30 +05302480 if (driver_data->setpolicy)
2481 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2482
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002483 if (cpufreq_boost_supported()) {
2484 /*
2485 * Check if driver provides function to enable boost -
2486 * if not, use cpufreq_boost_set_sw as default
2487 */
2488 if (!cpufreq_driver->set_boost)
2489 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2490
2491 ret = cpufreq_sysfs_create_file(&boost.attr);
2492 if (ret) {
2493 pr_err("%s: cannot register global BOOST sysfs file\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002494 __func__);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002495 goto err_null_driver;
2496 }
2497 }
2498
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002499 ret = subsys_interface_register(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002500 if (ret)
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002501 goto err_boost_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302503 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2504 list_empty(&cpufreq_policy_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 /* if all ->init() calls failed, unregister */
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302506 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2507 driver_data->name);
2508 goto err_if_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 }
2510
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002511 register_hotcpu_notifier(&cpufreq_cpu_notifier);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002512 pr_debug("driver %s up and running\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002514 return 0;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002515err_if_unreg:
2516 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002517err_boost_unreg:
2518 if (cpufreq_boost_supported())
2519 cpufreq_sysfs_remove_file(&boost.attr);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002520err_null_driver:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002521 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002522 cpufreq_driver = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002523 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Dave Jones4d34a672008-02-07 16:33:49 -05002524 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525}
2526EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2527
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528/**
2529 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2530 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302531 * Unregister the current CPUFreq driver. Only call this if you have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 * the right to do so, i.e. if you have succeeded in initialising before!
2533 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2534 * currently not initialised.
2535 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002536int cpufreq_unregister_driver(struct cpufreq_driver *driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537{
2538 unsigned long flags;
2539
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002540 if (!cpufreq_driver || (driver != cpufreq_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002543 pr_debug("unregistering driver %s\n", driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002545 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002546 if (cpufreq_boost_supported())
2547 cpufreq_sysfs_remove_file(&boost.attr);
2548
Chandra Seetharaman65edc682006-06-27 02:54:08 -07002549 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
Viresh Kumar6eed9402013-08-06 22:53:11 +05302551 down_write(&cpufreq_rwsem);
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002552 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302553
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002554 cpufreq_driver = NULL;
Viresh Kumar6eed9402013-08-06 22:53:11 +05302555
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002556 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302557 up_write(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558
2559 return 0;
2560}
2561EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002562
Doug Anderson90de2a42014-12-23 22:09:48 -08002563/*
2564 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2565 * or mutexes when secondary CPUs are halted.
2566 */
2567static struct syscore_ops cpufreq_syscore_ops = {
2568 .shutdown = cpufreq_suspend,
2569};
2570
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002571static int __init cpufreq_core_init(void)
2572{
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002573 if (cpufreq_disabled())
2574 return -ENODEV;
2575
Viresh Kumar2361be22013-05-17 16:09:09 +05302576 cpufreq_global_kobject = kobject_create();
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002577 BUG_ON(!cpufreq_global_kobject);
2578
Doug Anderson90de2a42014-12-23 22:09:48 -08002579 register_syscore_ops(&cpufreq_syscore_ops);
2580
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002581 return 0;
2582}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002583core_initcall(cpufreq_core_init);