Skip to content

Commit 67d874c

Browse files
vireshkrafaeljw
authored andcommitted
cpufreq: Register notifiers with the PM QoS framework
Register notifiers for min/max frequency constraints with the PM QoS framework. The constraints are also taken into consideration in cpufreq_set_policy(). This also relocates cpufreq_policy_put_kobj() as it is required to be called from cpufreq_policy_alloc() now. refresh_frequency_limits() is updated to avoid calling cpufreq_set_policy() for inactive policies and handle_update() is updated to have proper locking in place. No constraints are added until now though. Reviewed-by: Matthias Kaehlcke <mka@chromium.org> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Tested-by: Pavel Machek <pavel@ucw.cz> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
1 parent 208637b commit 67d874c

2 files changed

Lines changed: 108 additions & 30 deletions

File tree

drivers/cpufreq/cpufreq.c

Lines changed: 105 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <linux/kernel_stat.h>
2727
#include <linux/module.h>
2828
#include <linux/mutex.h>
29+
#include <linux/pm_qos.h>
2930
#include <linux/slab.h>
3031
#include <linux/suspend.h>
3132
#include <linux/syscore_ops.h>
@@ -999,7 +1000,7 @@ static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
9991000
{
10001001
struct device *dev = get_cpu_device(cpu);
10011002

1002-
if (!dev)
1003+
if (unlikely(!dev))
10031004
return;
10041005

10051006
if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
@@ -1117,14 +1118,16 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
11171118

11181119
static void refresh_frequency_limits(struct cpufreq_policy *policy)
11191120
{
1120-
struct cpufreq_policy new_policy = *policy;
1121-
1122-
pr_debug("updating policy for CPU %u\n", policy->cpu);
1121+
struct cpufreq_policy new_policy;
11231122

1124-
new_policy.min = policy->user_policy.min;
1125-
new_policy.max = policy->user_policy.max;
1123+
if (!policy_is_inactive(policy)) {
1124+
new_policy = *policy;
1125+
pr_debug("updating policy for CPU %u\n", policy->cpu);
11261126

1127-
cpufreq_set_policy(policy, &new_policy);
1127+
new_policy.min = policy->user_policy.min;
1128+
new_policy.max = policy->user_policy.max;
1129+
cpufreq_set_policy(policy, &new_policy);
1130+
}
11281131
}
11291132

11301133
static void handle_update(struct work_struct *work)
@@ -1133,14 +1136,60 @@ static void handle_update(struct work_struct *work)
11331136
container_of(work, struct cpufreq_policy, update);
11341137

11351138
pr_debug("handle_update for cpu %u called\n", policy->cpu);
1139+
down_write(&policy->rwsem);
11361140
refresh_frequency_limits(policy);
1141+
up_write(&policy->rwsem);
1142+
}
1143+
1144+
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1145+
void *data)
1146+
{
1147+
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1148+
1149+
schedule_work(&policy->update);
1150+
return 0;
1151+
}
1152+
1153+
static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1154+
void *data)
1155+
{
1156+
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1157+
1158+
schedule_work(&policy->update);
1159+
return 0;
1160+
}
1161+
1162+
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1163+
{
1164+
struct kobject *kobj;
1165+
struct completion *cmp;
1166+
1167+
down_write(&policy->rwsem);
1168+
cpufreq_stats_free_table(policy);
1169+
kobj = &policy->kobj;
1170+
cmp = &policy->kobj_unregister;
1171+
up_write(&policy->rwsem);
1172+
kobject_put(kobj);
1173+
1174+
/*
1175+
* We need to make sure that the underlying kobj is
1176+
* actually not referenced anymore by anybody before we
1177+
* proceed with unloading.
1178+
*/
1179+
pr_debug("waiting for dropping of refcount\n");
1180+
wait_for_completion(cmp);
1181+
pr_debug("wait complete\n");
11371182
}
11381183

11391184
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
11401185
{
11411186
struct cpufreq_policy *policy;
1187+
struct device *dev = get_cpu_device(cpu);
11421188
int ret;
11431189

1190+
if (!dev)
1191+
return NULL;
1192+
11441193
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
11451194
if (!policy)
11461195
return NULL;
@@ -1157,7 +1206,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
11571206
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
11581207
cpufreq_global_kobject, "policy%u", cpu);
11591208
if (ret) {
1160-
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1209+
dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
11611210
/*
11621211
* The entire policy object will be freed below, but the extra
11631212
* memory allocated for the kobject name needs to be freed by
@@ -1167,6 +1216,25 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
11671216
goto err_free_real_cpus;
11681217
}
11691218

1219+
policy->nb_min.notifier_call = cpufreq_notifier_min;
1220+
policy->nb_max.notifier_call = cpufreq_notifier_max;
1221+
1222+
ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
1223+
DEV_PM_QOS_MIN_FREQUENCY);
1224+
if (ret) {
1225+
dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1226+
ret, cpumask_pr_args(policy->cpus));
1227+
goto err_kobj_remove;
1228+
}
1229+
1230+
ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
1231+
DEV_PM_QOS_MAX_FREQUENCY);
1232+
if (ret) {
1233+
dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1234+
ret, cpumask_pr_args(policy->cpus));
1235+
goto err_min_qos_notifier;
1236+
}
1237+
11701238
INIT_LIST_HEAD(&policy->policy_list);
11711239
init_rwsem(&policy->rwsem);
11721240
spin_lock_init(&policy->transition_lock);
@@ -1177,6 +1245,11 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
11771245
policy->cpu = cpu;
11781246
return policy;
11791247

1248+
err_min_qos_notifier:
1249+
dev_pm_qos_remove_notifier(dev, &policy->nb_min,
1250+
DEV_PM_QOS_MIN_FREQUENCY);
1251+
err_kobj_remove:
1252+
cpufreq_policy_put_kobj(policy);
11801253
err_free_real_cpus:
11811254
free_cpumask_var(policy->real_cpus);
11821255
err_free_rcpumask:
@@ -1189,30 +1262,9 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
11891262
return NULL;
11901263
}
11911264

1192-
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1193-
{
1194-
struct kobject *kobj;
1195-
struct completion *cmp;
1196-
1197-
down_write(&policy->rwsem);
1198-
cpufreq_stats_free_table(policy);
1199-
kobj = &policy->kobj;
1200-
cmp = &policy->kobj_unregister;
1201-
up_write(&policy->rwsem);
1202-
kobject_put(kobj);
1203-
1204-
/*
1205-
* We need to make sure that the underlying kobj is
1206-
* actually not referenced anymore by anybody before we
1207-
* proceed with unloading.
1208-
*/
1209-
pr_debug("waiting for dropping of refcount\n");
1210-
wait_for_completion(cmp);
1211-
pr_debug("wait complete\n");
1212-
}
1213-
12141265
static void cpufreq_policy_free(struct cpufreq_policy *policy)
12151266
{
1267+
struct device *dev = get_cpu_device(policy->cpu);
12161268
unsigned long flags;
12171269
int cpu;
12181270

@@ -1224,6 +1276,11 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
12241276
per_cpu(cpufreq_cpu_data, cpu) = NULL;
12251277
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
12261278

1279+
dev_pm_qos_remove_notifier(dev, &policy->nb_max,
1280+
DEV_PM_QOS_MAX_FREQUENCY);
1281+
dev_pm_qos_remove_notifier(dev, &policy->nb_min,
1282+
DEV_PM_QOS_MIN_FREQUENCY);
1283+
12271284
cpufreq_policy_put_kobj(policy);
12281285
free_cpumask_var(policy->real_cpus);
12291286
free_cpumask_var(policy->related_cpus);
@@ -2283,6 +2340,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
22832340
struct cpufreq_policy *new_policy)
22842341
{
22852342
struct cpufreq_governor *old_gov;
2343+
struct device *cpu_dev = get_cpu_device(policy->cpu);
2344+
unsigned long min, max;
22862345
int ret;
22872346

22882347
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
@@ -2297,11 +2356,27 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
22972356
if (new_policy->min > new_policy->max)
22982357
return -EINVAL;
22992358

2359+
/*
2360+
* PM QoS framework collects all the requests from users and provide us
2361+
* the final aggregated value here.
2362+
*/
2363+
min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
2364+
max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
2365+
2366+
if (min > new_policy->min)
2367+
new_policy->min = min;
2368+
if (max < new_policy->max)
2369+
new_policy->max = max;
2370+
23002371
/* verify the cpu speed can be set within this limit */
23012372
ret = cpufreq_driver->verify(new_policy);
23022373
if (ret)
23032374
return ret;
23042375

2376+
/*
2377+
* The notifier-chain shall be removed once all the users of
2378+
* CPUFREQ_ADJUST are moved to use the QoS framework.
2379+
*/
23052380
/* adjust if necessary - all reasons */
23062381
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
23072382
CPUFREQ_ADJUST, new_policy);

include/linux/cpufreq.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,9 @@ struct cpufreq_policy {
147147

148148
/* Pointer to the cooling device if used for thermal mitigation */
149149
struct thermal_cooling_device *cdev;
150+
151+
struct notifier_block nb_min;
152+
struct notifier_block nb_max;
150153
};
151154

152155
struct cpufreq_freqs {

0 commit comments

Comments
 (0)