@@ -772,7 +772,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
772
772
* For !fair tasks do:
773
773
*
774
774
update_cfs_rq_load_avg(now, cfs_rq);
775
- attach_entity_load_avg(cfs_rq, se);
775
+ attach_entity_load_avg(cfs_rq, se, 0 );
776
776
switched_from_fair(rq, p);
777
777
*
778
778
* such that the next switched_to_fair() has the
@@ -3009,11 +3009,11 @@ static inline void update_cfs_group(struct sched_entity *se)
3009
3009
}
3010
3010
#endif /* CONFIG_FAIR_GROUP_SCHED */
3011
3011
3012
- static inline void cfs_rq_util_change (struct cfs_rq * cfs_rq )
3012
+ static inline void cfs_rq_util_change (struct cfs_rq * cfs_rq , int flags )
3013
3013
{
3014
3014
struct rq * rq = rq_of (cfs_rq );
3015
3015
3016
- if (& rq -> cfs == cfs_rq ) {
3016
+ if (& rq -> cfs == cfs_rq || ( flags & SCHED_CPUFREQ_MIGRATION ) ) {
3017
3017
/*
3018
3018
* There are a few boundary cases this might miss but it should
3019
3019
* get called often enough that that should (hopefully) not be
@@ -3028,7 +3028,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
3028
3028
*
3029
3029
* See cpu_util().
3030
3030
*/
3031
- cpufreq_update_util (rq , 0 );
3031
+ cpufreq_update_util (rq , flags );
3032
3032
}
3033
3033
}
3034
3034
@@ -3686,7 +3686,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3686
3686
#endif
3687
3687
3688
3688
if (decayed )
3689
- cfs_rq_util_change (cfs_rq );
3689
+ cfs_rq_util_change (cfs_rq , 0 );
3690
3690
3691
3691
return decayed ;
3692
3692
}
@@ -3699,7 +3699,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3699
3699
* Must call update_cfs_rq_load_avg() before this, since we rely on
3700
3700
* cfs_rq->avg.last_update_time being current.
3701
3701
*/
3702
- static void attach_entity_load_avg (struct cfs_rq * cfs_rq , struct sched_entity * se )
3702
+ static void attach_entity_load_avg (struct cfs_rq * cfs_rq , struct sched_entity * se , int flags )
3703
3703
{
3704
3704
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq -> avg .period_contrib ;
3705
3705
@@ -3735,7 +3735,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3735
3735
3736
3736
add_tg_cfs_propagate (cfs_rq , se -> avg .load_sum );
3737
3737
3738
- cfs_rq_util_change (cfs_rq );
3738
+ cfs_rq_util_change (cfs_rq , flags );
3739
3739
}
3740
3740
3741
3741
/**
@@ -3754,7 +3754,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3754
3754
3755
3755
add_tg_cfs_propagate (cfs_rq , - se -> avg .load_sum );
3756
3756
3757
- cfs_rq_util_change (cfs_rq );
3757
+ cfs_rq_util_change (cfs_rq , 0 );
3758
3758
}
3759
3759
3760
3760
/*
@@ -3784,7 +3784,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3784
3784
3785
3785
if (!se -> avg .last_update_time && (flags & DO_ATTACH )) {
3786
3786
3787
- attach_entity_load_avg (cfs_rq , se );
3787
+ /*
3788
+ * DO_ATTACH means we're here from enqueue_entity().
3789
+ * !last_update_time means we've passed through
3790
+ * migrate_task_rq_fair() indicating we migrated.
3791
+ *
3792
+ * IOW we're enqueueing a task on a new CPU.
3793
+ */
3794
+ attach_entity_load_avg (cfs_rq , se , SCHED_CPUFREQ_MIGRATION );
3788
3795
update_tg_load_avg (cfs_rq , 0 );
3789
3796
3790
3797
} else if (decayed && (flags & UPDATE_TG ))
@@ -3880,13 +3887,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3880
3887
3881
3888
static inline void update_load_avg (struct cfs_rq * cfs_rq , struct sched_entity * se , int not_used1 )
3882
3889
{
3883
- cfs_rq_util_change (cfs_rq );
3890
+ cfs_rq_util_change (cfs_rq , 0 );
3884
3891
}
3885
3892
3886
3893
static inline void remove_entity_load_avg (struct sched_entity * se ) {}
3887
3894
3888
3895
static inline void
3889
- attach_entity_load_avg (struct cfs_rq * cfs_rq , struct sched_entity * se ) {}
3896
+ attach_entity_load_avg (struct cfs_rq * cfs_rq , struct sched_entity * se , int flags ) {}
3890
3897
static inline void
3891
3898
detach_entity_load_avg (struct cfs_rq * cfs_rq , struct sched_entity * se ) {}
3892
3899
@@ -9726,7 +9733,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
9726
9733
9727
9734
/* Synchronize entity with its cfs_rq */
9728
9735
update_load_avg (cfs_rq , se , sched_feat (ATTACH_AGE_LOAD ) ? 0 : SKIP_AGE_LOAD );
9729
- attach_entity_load_avg (cfs_rq , se );
9736
+ attach_entity_load_avg (cfs_rq , se , 0 );
9730
9737
update_tg_load_avg (cfs_rq , false);
9731
9738
propagate_entity_cfs_rq (se );
9732
9739
}
0 commit comments