Skip to content

Commit a61dec7

Browse files
committed
cpufreq: schedutil: Avoid missing updates for one-CPU policies
Commit 152db03 (schedutil: Allow cpufreq requests to be made even when kthread kicked) made changes to prevent utilization updates from being discarded during processing a previous request, but it left a small window in which that still can happen in the one-CPU policy case. Namely, updates coming in after setting work_in_progress in sugov_update_commit() and clearing it in sugov_work() will still be dropped due to the work_in_progress check in sugov_update_single(). To close that window, rearrange the code so as to acquire the update lock around the deferred update branch in sugov_update_single() and drop the work_in_progress check from it. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Juri Lelli <juri.lelli@redhat.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
1 parent 152db03 commit a61dec7

File tree

1 file changed

+47
-23
lines changed

1 file changed

+47
-23
lines changed

kernel/sched/cpufreq_schedutil.c

Lines changed: 47 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -100,25 +100,41 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
100100
return delta_ns >= sg_policy->freq_update_delay_ns;
101101
}
102102

103-
static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
104-
unsigned int next_freq)
103+
static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
104+
unsigned int next_freq)
105105
{
106-
struct cpufreq_policy *policy = sg_policy->policy;
107-
108106
if (sg_policy->next_freq == next_freq)
109-
return;
107+
return false;
110108

111109
sg_policy->next_freq = next_freq;
112110
sg_policy->last_freq_update_time = time;
113111

114-
if (policy->fast_switch_enabled) {
115-
next_freq = cpufreq_driver_fast_switch(policy, next_freq);
116-
if (!next_freq)
117-
return;
112+
return true;
113+
}
114+
115+
static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
116+
unsigned int next_freq)
117+
{
118+
struct cpufreq_policy *policy = sg_policy->policy;
119+
120+
if (!sugov_update_next_freq(sg_policy, time, next_freq))
121+
return;
122+
123+
next_freq = cpufreq_driver_fast_switch(policy, next_freq);
124+
if (!next_freq)
125+
return;
118126

119-
policy->cur = next_freq;
120-
trace_cpu_frequency(next_freq, smp_processor_id());
121-
} else if (!sg_policy->work_in_progress) {
127+
policy->cur = next_freq;
128+
trace_cpu_frequency(next_freq, smp_processor_id());
129+
}
130+
131+
static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
132+
unsigned int next_freq)
133+
{
134+
if (!sugov_update_next_freq(sg_policy, time, next_freq))
135+
return;
136+
137+
if (!sg_policy->work_in_progress) {
122138
sg_policy->work_in_progress = true;
123139
irq_work_queue(&sg_policy->irq_work);
124140
}
@@ -363,13 +379,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
363379

364380
ignore_dl_rate_limit(sg_cpu, sg_policy);
365381

366-
/*
367-
* For slow-switch systems, single policy requests can't run at the
368-
* moment if update is in progress, unless we acquire update_lock.
369-
*/
370-
if (sg_policy->work_in_progress)
371-
return;
372-
373382
if (!sugov_should_update_freq(sg_policy, time))
374383
return;
375384

@@ -391,7 +400,18 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
391400
sg_policy->cached_raw_freq = 0;
392401
}
393402

394-
sugov_update_commit(sg_policy, time, next_f);
403+
/*
404+
* This code runs under rq->lock for the target CPU, so it won't run
405+
* concurrently on two different CPUs for the same target and it is not
406+
* necessary to acquire the lock in the fast switch case.
407+
*/
408+
if (sg_policy->policy->fast_switch_enabled) {
409+
sugov_fast_switch(sg_policy, time, next_f);
410+
} else {
411+
raw_spin_lock(&sg_policy->update_lock);
412+
sugov_deferred_update(sg_policy, time, next_f);
413+
raw_spin_unlock(&sg_policy->update_lock);
414+
}
395415
}
396416

397417
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
@@ -435,7 +455,11 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
435455

436456
if (sugov_should_update_freq(sg_policy, time)) {
437457
next_f = sugov_next_freq_shared(sg_cpu, time);
438-
sugov_update_commit(sg_policy, time, next_f);
458+
459+
if (sg_policy->policy->fast_switch_enabled)
460+
sugov_fast_switch(sg_policy, time, next_f);
461+
else
462+
sugov_deferred_update(sg_policy, time, next_f);
439463
}
440464

441465
raw_spin_unlock(&sg_policy->update_lock);
@@ -450,11 +474,11 @@ static void sugov_work(struct kthread_work *work)
450474
/*
451475
* Hold sg_policy->update_lock shortly to handle the case where:
452476
* incase sg_policy->next_freq is read here, and then updated by
453-
* sugov_update_shared just before work_in_progress is set to false
477+
* sugov_deferred_update() just before work_in_progress is set to false
454478
* here, we may miss queueing the new update.
455479
*
456480
* Note: If a work was queued after the update_lock is released,
457-
* sugov_work will just be called again by kthread_work code; and the
481+
* sugov_work() will just be called again by kthread_work code; and the
458482
* request will be proceed before the sugov thread sleeps.
459483
*/
460484
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);

0 commit comments

Comments
 (0)