52b15384864bc0cf180194c10e68f114873019cd Issues on kernel/sched/deadline.c file . kernel/sched/deadline.c | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 686ec8a..63c9ad7 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -47,7 +47,7 @@ static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) { struct sched_dl_entity *dl_se = &p->dl; - return dl_rq->rb_leftmost == &dl_se->rb_node; + return (dl_rq->rb_leftmost == &dl_se->rb_node); } void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) @@ -119,7 +119,7 @@ static inline void dl_clear_overload(struct rq *rq) static void update_dl_migration(struct dl_rq *dl_rq) { - if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { + if (dl_rq->dl_nr_migratory && (dl_rq->dl_nr_running > 1)) { if (!dl_rq->overloaded) { dl_set_overload(rq_of_dl_rq(dl_rq)); dl_rq->overloaded = 1; @@ -406,7 +406,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, dl_se->runtime = pi_se->dl_runtime; } - if (dl_se->dl_yielded && dl_se->runtime > 0) + if (dl_se->dl_yielded && (dl_se->runtime > 0)) dl_se->runtime = 0; /* @@ -792,7 +792,7 @@ static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) { struct rq *rq = rq_of_dl_rq(dl_rq); - if (dl_rq->earliest_dl.curr == 0 || + if ((dl_rq->earliest_dl.curr == 0) || dl_time_before(deadline, dl_rq->earliest_dl.curr)) { dl_rq->earliest_dl.curr = deadline; cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); @@ -951,7 +951,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) * it, as it's going to return back to its original * scheduling class after this. */ - BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH); + BUG_ON(!p->dl.dl_boosted || (flags != ENQUEUE_REPLENISH)); return; } @@ -966,7 +966,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) enqueue_dl_entity(&p->dl, pi_se, flags); - if (!task_current(rq, p) && p->nr_cpus_allowed > 1) + if (!task_current(rq, p) && (p->nr_cpus_allowed > 1)) enqueue_pushable_dl_task(rq, p); } @@ -1040,12 +1040,12 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) * try to make it stay here, it might be important. */ if (unlikely(dl_task(curr)) && - (curr->nr_cpus_allowed < 2 || + ((curr->nr_cpus_allowed < 2) || !dl_entity_preempt(&p->dl, &curr->dl)) && (p->nr_cpus_allowed > 1)) { int target = find_later_rq(p); - if (target != -1 && + if ((target != -1) && (dl_time_before(p->dl.deadline, cpu_rq(target)->dl.earliest_dl.curr) || (cpu_rq(target)->dl.dl_nr_running == 0))) @@ -1063,16 +1063,16 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) * Current can't be migrated, useless to reschedule, * let's hope p can move out. */ - if (rq->curr->nr_cpus_allowed == 1 || - cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) + if ((rq->curr->nr_cpus_allowed == 1) || + (cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)) return; /* * p is migratable, so let's not schedule it and * see if it is pushed or pulled somewhere else. */ - if (p->nr_cpus_allowed != 1 && - cpudl_find(&rq->rd->cpudl, p, NULL) != -1) + if ((p->nr_cpus_allowed != 1) && + (cpudl_find(&rq->rd->cpudl, p, NULL) != -1)) return; resched_curr(rq); @@ -1198,7 +1198,7 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) * not being the leftmost task anymore. In that case NEED_RESCHED will * be set and schedule() will start a new hrtick for the next task. */ - if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && + if (hrtick_enabled(rq) && queued && (p->dl.runtime > 0) && is_leftmost(p, &rq->dl)) start_hrtick_dl(rq, p); } @@ -1328,7 +1328,7 @@ static int find_later_rq(struct task_struct *task) * If possible, preempting this_cpu is * cheaper than migrating. */ - if (this_cpu != -1 && + if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { rcu_read_unlock(); return this_cpu; @@ -1338,7 +1338,7 @@ static int find_later_rq(struct task_struct *task) * Last chance: if best_cpu is valid and is * in the mask, that becomes our choice. */ - if (best_cpu < nr_cpu_ids && + if ((best_cpu < nr_cpu_ids) && cpumask_test_cpu(best_cpu, sched_domain_span(sd))) { rcu_read_unlock(); return best_cpu; @@ -1390,7 +1390,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { - if (unlikely(task_rq(task) != rq || + if (unlikely((task_rq(task) != rq) || !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || task_running(rq, task) || @@ -1471,7 +1471,7 @@ retry: */ if (dl_task(rq->curr) && dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && - rq->curr->nr_cpus_allowed > 1) { + (rq->curr->nr_cpus_allowed > 1)) { resched_curr(rq); return 0; } @@ -1490,7 +1490,7 @@ retry: * then possible that next_task has migrated. */ task = pick_next_pushable_dl_task(rq); - if (task_cpu(next_task) == rq->cpu && task == next_task) { + if ((task_cpu(next_task) == rq->cpu) && (task == next_task)) { /* * The task is still there. We don't try * again, some other cpu will pull it when ready. @@ -1618,9 +1618,9 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) { if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && - p->nr_cpus_allowed > 1 && + (p->nr_cpus_allowed > 1) && dl_task(rq->curr) && - (rq->curr->nr_cpus_allowed < 2 || + ((rq->curr->nr_cpus_allowed < 2) || !dl_entity_preempt(&p->dl, &rq->curr->dl))) { push_dl_tasks(rq); } @@ -1722,9 +1722,9 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) if (dl_time_before(p->dl.deadline, rq_clock(rq))) setup_new_dl_entity(&p->dl, &p->dl); - if (task_on_rq_queued(p) && rq->curr != p) { + if (task_on_rq_queued(p) && (rq->curr != p)) { #ifdef CONFIG_SMP - if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) + if ((p->nr_cpus_allowed > 1) && rq->dl.overloaded) queue_push_tasks(rq); #else if (dl_task(rq->curr)) @@ -1742,7 +1742,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) static void prio_changed_dl(struct rq *rq, struct task_struct *p, int oldprio) { - if (task_on_rq_queued(p) || rq->curr == p) { + if (task_on_rq_queued(p) || (rq->curr == p)) { #ifdef CONFIG_SMP /* * This might be too much, but unfortunately -- 2.7.4