@@ -782,6 +782,21 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
782
782
cfs_rq -> min_vruntime = __update_min_vruntime (cfs_rq , vruntime );
783
783
}
784
784
785
+ static inline u64 cfs_rq_min_slice (struct cfs_rq * cfs_rq )
786
+ {
787
+ struct sched_entity * root = __pick_root_entity (cfs_rq );
788
+ struct sched_entity * curr = cfs_rq -> curr ;
789
+ u64 min_slice = ~0ULL ;
790
+
791
+ if (curr && curr -> on_rq )
792
+ min_slice = curr -> slice ;
793
+
794
+ if (root )
795
+ min_slice = min (min_slice , root -> min_slice );
796
+
797
+ return min_slice ;
798
+ }
799
+
785
800
static inline bool __entity_less (struct rb_node * a , const struct rb_node * b )
786
801
{
787
802
return entity_before (__node_2_se (a ), __node_2_se (b ));
@@ -798,19 +813,34 @@ static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node
798
813
}
799
814
}
800
815
816
+ static inline void __min_slice_update (struct sched_entity * se , struct rb_node * node )
817
+ {
818
+ if (node ) {
819
+ struct sched_entity * rse = __node_2_se (node );
820
+ if (rse -> min_slice < se -> min_slice )
821
+ se -> min_slice = rse -> min_slice ;
822
+ }
823
+ }
824
+
801
825
/*
802
826
* se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
803
827
*/
804
828
static inline bool min_vruntime_update (struct sched_entity * se , bool exit )
805
829
{
806
830
u64 old_min_vruntime = se -> min_vruntime ;
831
+ u64 old_min_slice = se -> min_slice ;
807
832
struct rb_node * node = & se -> run_node ;
808
833
809
834
se -> min_vruntime = se -> vruntime ;
810
835
__min_vruntime_update (se , node -> rb_right );
811
836
__min_vruntime_update (se , node -> rb_left );
812
837
813
- return se -> min_vruntime == old_min_vruntime ;
838
+ se -> min_slice = se -> slice ;
839
+ __min_slice_update (se , node -> rb_right );
840
+ __min_slice_update (se , node -> rb_left );
841
+
842
+ return se -> min_vruntime == old_min_vruntime &&
843
+ se -> min_slice == old_min_slice ;
814
844
}
815
845
816
846
RB_DECLARE_CALLBACKS (static , min_vruntime_cb , struct sched_entity ,
@@ -823,6 +853,7 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
823
853
{
824
854
avg_vruntime_add (cfs_rq , se );
825
855
se -> min_vruntime = se -> vruntime ;
856
+ se -> min_slice = se -> slice ;
826
857
rb_add_augmented_cached (& se -> run_node , & cfs_rq -> tasks_timeline ,
827
858
__entity_less , & min_vruntime_cb );
828
859
}
@@ -6911,6 +6942,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6911
6942
int idle_h_nr_running = task_has_idle_policy (p );
6912
6943
int task_new = !(flags & ENQUEUE_WAKEUP );
6913
6944
int rq_h_nr_running = rq -> cfs .h_nr_running ;
6945
+ u64 slice = 0 ;
6914
6946
6915
6947
if (flags & ENQUEUE_DELAYED ) {
6916
6948
requeue_delayed_entity (se );
@@ -6940,7 +6972,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6940
6972
break ;
6941
6973
}
6942
6974
cfs_rq = cfs_rq_of (se );
6975
+
6976
+ /*
6977
+ * Basically set the slice of group entries to the min_slice of
6978
+ * their respective cfs_rq. This ensures the group can service
6979
+ * its entities in the desired time-frame.
6980
+ */
6981
+ if (slice ) {
6982
+ se -> slice = slice ;
6983
+ se -> custom_slice = 1 ;
6984
+ }
6943
6985
enqueue_entity (cfs_rq , se , flags );
6986
+ slice = cfs_rq_min_slice (cfs_rq );
6944
6987
6945
6988
cfs_rq -> h_nr_running ++ ;
6946
6989
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
@@ -6962,6 +7005,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6962
7005
se_update_runnable (se );
6963
7006
update_cfs_group (se );
6964
7007
7008
+ se -> slice = slice ;
7009
+ slice = cfs_rq_min_slice (cfs_rq );
7010
+
6965
7011
cfs_rq -> h_nr_running ++ ;
6966
7012
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
6967
7013
@@ -7027,11 +7073,15 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7027
7073
int idle_h_nr_running = 0 ;
7028
7074
int h_nr_running = 0 ;
7029
7075
struct cfs_rq * cfs_rq ;
7076
+ u64 slice = 0 ;
7030
7077
7031
7078
if (entity_is_task (se )) {
7032
7079
p = task_of (se );
7033
7080
h_nr_running = 1 ;
7034
7081
idle_h_nr_running = task_has_idle_policy (p );
7082
+ } else {
7083
+ cfs_rq = group_cfs_rq (se );
7084
+ slice = cfs_rq_min_slice (cfs_rq );
7035
7085
}
7036
7086
7037
7087
for_each_sched_entity (se ) {
@@ -7056,6 +7106,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7056
7106
7057
7107
/* Don't dequeue parent if it has other entities besides us */
7058
7108
if (cfs_rq -> load .weight ) {
7109
+ slice = cfs_rq_min_slice (cfs_rq );
7110
+
7059
7111
/* Avoid re-evaluating load for this entity: */
7060
7112
se = parent_entity (se );
7061
7113
/*
@@ -7077,6 +7129,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7077
7129
se_update_runnable (se );
7078
7130
update_cfs_group (se );
7079
7131
7132
+ se -> slice = slice ;
7133
+ slice = cfs_rq_min_slice (cfs_rq );
7134
+
7080
7135
cfs_rq -> h_nr_running -= h_nr_running ;
7081
7136
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7082
7137
0 commit comments