diff -ru a/include/linux/sched.h linux-2.6.24.4/include/linux/sched.h --- a/include/linux/sched.h 2008-03-24 14:49:18.000000000 -0400 +++ linux-2.6.24.4/include/linux/sched.h 2008-04-07 00:14:34.000000000 -0400 @@ -847,6 +847,10 @@ void (*set_curr_task) (struct rq *rq); void (*task_tick) (struct rq *rq, struct task_struct *p); void (*task_new) (struct rq *rq, struct task_struct *p); + +#ifdef CONFIG_FAIR_GROUP_SCHED + void (*moved_group) (struct task_struct *p); +#endif }; struct load_weight { diff -ru a/kernel/sched.c linux-2.6.24.4/kernel/sched.c --- a/kernel/sched.c 2008-03-24 14:49:18.000000000 -0400 +++ linux-2.6.24.4/kernel/sched.c 2008-04-07 00:13:11.000000000 -0400 @@ -7115,6 +7115,11 @@ set_task_cfs_rq(tsk, task_cpu(tsk)); +#ifdef CONFIG_FAIR_GROUP_SCHED + if (tsk->sched_class->moved_group) + tsk->sched_class->moved_group(tsk); +#endif + if (unlikely(running)) tsk->sched_class->set_curr_task(rq); if (on_rq) diff -ru a/kernel/sched_fair.c linux-2.6.24.4/kernel/sched_fair.c --- a/kernel/sched_fair.c 2008-03-24 14:49:18.000000000 -0400 +++ linux-2.6.24.4/kernel/sched_fair.c 2008-04-07 00:16:41.000000000 -0400 @@ -1104,6 +1104,16 @@ set_next_entity(cfs_rq_of(se), se); } +#ifdef CONFIG_FAIR_GROUP_SCHED +static void moved_group_fair(struct task_struct *p) +{ + struct cfs_rq *cfs_rq = task_cfs_rq(p); + + update_curr(cfs_rq); + place_entity(cfs_rq, &p->se, 1); +} +#endif + /* * All the scheduling class methods: */ @@ -1126,6 +1136,10 @@ .set_curr_task = set_curr_task_fair, .task_tick = task_tick_fair, .task_new = task_new_fair, + +#ifdef CONFIG_FAIR_GROUP_SCHED + .moved_group = moved_group_fair, +#endif }; #ifdef CONFIG_SCHED_DEBUG