d955526d2e4ddc9a994cf6ac378108c4c8fcc182 Issues on sched/fair.c kernel/sched/fair.c | 223 ++++++++++++++++++++++++++-------------------------- 1 file changed, 111 insertions(+), 112 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e7dd0ec..9152500 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -191,7 +191,7 @@ static void __update_inv_weight(struct load_weight *lw) w = scale_load_down(lw->weight); - if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) + if ((BITS_PER_LONG > 32) && unlikely(w >= WMULT_CONST)) lw->inv_weight = 1; else if (unlikely(!w)) lw->inv_weight = WMULT_CONST; @@ -264,7 +264,7 @@ static inline struct task_struct *task_of(struct sched_entity *se) /* Walk up scheduling entities hierarchy */ #define for_each_sched_entity(se) \ - for (; se; se = se->parent) + for (; se; (se = se->parent)) static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) { @@ -451,7 +451,7 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) static inline int entity_before(struct sched_entity *a, struct sched_entity *b) { - return (s64)(a->vruntime - b->vruntime) < 0; + return ((s64)(a->vruntime - b->vruntime) < 0); } static void update_min_vruntime(struct cfs_rq *cfs_rq) @@ -884,7 +884,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p) rss = nr_scan_pages; rss = round_up(rss, nr_scan_pages); - return rss / nr_scan_pages; + return (rss / nr_scan_pages); } /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ @@ -1004,7 +1004,7 @@ static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) static bool numa_is_active_node(int nid, struct numa_group *ng) { - return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; + return (group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION) > ng->max_faults_cpu; } /* Handle placement on systems where not all nodes are directly connected. */ @@ -1033,7 +1033,7 @@ static unsigned long score_nearby_nodes(struct task_struct *p, int nid, * The furthest away nodes in the system are not interesting * for placement; nid was already counted. */ - if (dist == sched_max_numa_distance || node == nid) + if ((dist == sched_max_numa_distance) || (node == nid)) continue; /* @@ -1043,8 +1043,8 @@ static unsigned long score_nearby_nodes(struct task_struct *p, int nid, * "hoplimit", only nodes closer by than "hoplimit" are part * of each group. Skip other nodes. */ - if (sched_numa_topology_type == NUMA_BACKPLANE && - dist > maxdist) + if ((sched_numa_topology_type == NUMA_BACKPLANE) && + (dist > maxdist)) continue; /* Add up the faults from nearby nodes. */ @@ -1094,7 +1094,7 @@ static inline unsigned long task_weight(struct task_struct *p, int nid, faults = task_faults(p, nid); faults += score_nearby_nodes(p, nid, dist, true); - return 1000 * faults / total_faults; + return (1000 * faults / total_faults); } static inline unsigned long group_weight(struct task_struct *p, int nid, @@ -1113,7 +1113,7 @@ static inline unsigned long group_weight(struct task_struct *p, int nid, faults = group_faults(p, nid); faults += score_nearby_nodes(p, nid, dist, false); - return 1000 * faults / total_faults; + return (1000 * faults / total_faults); } bool should_numa_migrate_memory(struct task_struct *p, struct page * page, @@ -1144,7 +1144,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, */ last_cpupid = page_cpupid_xchg_last(page, this_cpupid); if (!cpupid_pid_unset(last_cpupid) && - cpupid_to_nid(last_cpupid) != dst_nid) + (cpupid_to_nid(last_cpupid) != dst_nid)) return false; /* Always allow migrate on private faults */ @@ -1159,8 +1159,8 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, * Destination node is much more heavily used than the source * node? Allow migration. */ - if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * - ACTIVE_NODE_FRACTION) + if (group_faults_cpu(ng, dst_nid) > (group_faults_cpu(ng, src_nid) * + ACTIVE_NODE_FRACTION)) return true; /* @@ -1171,8 +1171,8 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, * --------------- * - > --------------- * faults_mem(dst) 4 faults_mem(src) */ - return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > - group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; + return ((group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3) > + (group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4)); } static unsigned long weighted_cpuload(const int cpu); @@ -1282,8 +1282,8 @@ static bool load_too_imbalanced(long src_load, long dst_load, swap(dst_load, src_load); /* Is the difference below the threshold? */ - imb = dst_load * src_capacity * 100 - - src_load * dst_capacity * env->imbalance_pct; + imb = (dst_load * src_capacity * 100) - + (src_load * dst_capacity * env->imbalance_pct); if (imb <= 0) return false; @@ -1297,8 +1297,8 @@ static bool load_too_imbalanced(long src_load, long dst_load, if (orig_dst_load < orig_src_load) swap(orig_dst_load, orig_src_load); - old_imb = orig_dst_load * src_capacity * 100 - - orig_src_load * dst_capacity * env->imbalance_pct; + old_imb = (orig_dst_load * src_capacity * 100) - + (orig_src_load * dst_capacity * env->imbalance_pct); /* Would this change make things worse? */ return (imb > old_imb); @@ -1394,12 +1394,12 @@ static void task_numa_compare(struct task_numa_env *env, } } - if (imp <= env->best_imp && moveimp <= env->best_imp) + if ((imp <= env->best_imp) && (moveimp <= env->best_imp)) goto unlock; if (!cur) { /* Is there capacity at our destination? */ - if (env->src_stats.nr_running <= env->src_stats.task_capacity && + if ((env->src_stats.nr_running <= env->src_stats.task_capacity) && !env->dst_stats.has_free_capacity) goto unlock; @@ -1407,8 +1407,8 @@ static void task_numa_compare(struct task_numa_env *env, } /* Balance doesn't matter much if we're running a task per cpu */ - if (imp > env->best_imp && src_rq->nr_running == 1 && - dst_rq->nr_running == 1) + if ((imp > env->best_imp) && (src_rq->nr_running == 1) && + (dst_rq->nr_running == 1)) goto assign; /* @@ -1498,9 +1498,8 @@ static bool numa_has_capacity(struct task_numa_env *env) * --------------------- vs --------------------- * src->compute_capacity dst->compute_capacity */ - if (src->load * dst->compute_capacity * env->imbalance_pct > - - dst->load * src->compute_capacity * 100) + if ((src->load * dst->compute_capacity * env->imbalance_pct) > + (dst->load * src->compute_capacity * 100)) return true; return false; @@ -1570,14 +1569,14 @@ static int task_numa_migrate(struct task_struct *p) * multiple NUMA nodes; in order to better consolidate the group, * we need to check other locations. */ - if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) { + if ((env.best_cpu == -1) || (p->numa_group && (p->numa_group->active_nodes > 1))) { for_each_online_node(nid) { - if (nid == env.src_nid || nid == p->numa_preferred_nid) + if ((nid == env.src_nid) || (nid == p->numa_preferred_nid)) continue; dist = node_distance(env.src_nid, env.dst_nid); - if (sched_numa_topology_type == NUMA_BACKPLANE && - dist != env.dist) { + if ((sched_numa_topology_type == NUMA_BACKPLANE) && + (dist != env.dist)) { taskweight = task_weight(p, env.src_nid, dist); groupweight = group_weight(p, env.src_nid, dist); } @@ -1585,7 +1584,7 @@ static int task_numa_migrate(struct task_struct *p) /* Only consider nodes where both task and groups benefit */ taskimp = task_weight(p, nid, dist) - taskweight; groupimp = group_weight(p, nid, dist) - groupweight; - if (taskimp < 0 && groupimp < 0) + if ((taskimp < 0) && (groupimp < 0)) continue; env.dist = dist; @@ -1612,7 +1611,7 @@ static int task_numa_migrate(struct task_struct *p) else nid = env.dst_nid; - if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng)) + if ((ng->active_nodes > 1) && numa_is_active_node(env.dst_nid, ng)) sched_setnuma(p, env.dst_nid); } @@ -1646,7 +1645,7 @@ static void numa_migrate_preferred(struct task_struct *p) unsigned long interval = HZ; /* This task has no NUMA fault statistics yet */ - if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) + if (unlikely((p->numa_preferred_nid == -1) || !p->numa_faults)) return; /* Periodically retry migrating the task to the preferred node */ @@ -1680,7 +1679,7 @@ static void numa_group_count_active_nodes(struct numa_group *numa_group) for_each_online_node(nid) { faults = group_faults_cpu(numa_group, nid); - if (faults * ACTIVE_NODE_FRACTION > max_faults) + if ((faults * ACTIVE_NODE_FRACTION) > max_faults) active_nodes++; } @@ -1721,7 +1720,7 @@ static void update_task_scan_period(struct task_struct *p, * migration then it implies we are migrating too quickly or the local * node is overloaded. In either case, scan slower */ - if (local + shared == 0 || p->numa_faults_locality[2]) { + if ((local + shared == 0) || p->numa_faults_locality[2]) { p->numa_scan_period = min(p->numa_scan_period_max, p->numa_scan_period << 1); @@ -2061,7 +2060,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, /* * Tie-break on the grp address. */ - if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) + if ((my_grp->nr_tasks == grp->nr_tasks) && (my_grp > grp)) goto no_join; /* Always join threads in the same process. */ @@ -2086,7 +2085,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, BUG_ON(irqs_disabled()); double_lock_irq(&my_grp->lock, &grp->lock); - for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { + for (i = 0; i < (NR_NUMA_HINT_FAULT_STATS * nr_node_ids); i++) { my_grp->faults[i] -= p->numa_faults[i]; grp->faults[i] += p->numa_faults[i]; } @@ -2183,7 +2182,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) * scan rate to slow down when a workload has settled down. */ ng = p->numa_group; - if (!priv && !local && ng && ng->active_nodes > 1 && + if (!priv && !local && ng && (ng->active_nodes > 1) && numa_is_active_node(cpu_node, ng) && numa_is_active_node(mem_node, ng)) local = 1; @@ -2334,7 +2333,7 @@ void task_numa_work(struct callback_head *work) virtpages -= (end - start) >> PAGE_SHIFT; start = end; - if (pages <= 0 || virtpages <= 0) + if ((pages <= 0) || (virtpages <= 0)) goto out; cond_resched(); @@ -2377,7 +2376,7 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) /* * We don't care about NUMA placement if we don't have memory. */ - if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) + if (!curr->mm || (curr->flags & PF_EXITING) || (work->next != work)) return; /* @@ -2389,7 +2388,7 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) now = curr->se.sum_exec_runtime; period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; - if (now > curr->node_stamp + period) { + if (now > (curr->node_stamp + period)) { if (!curr->node_stamp) curr->numa_scan_period = task_scan_min(curr); curr->node_stamp += period; @@ -2559,7 +2558,7 @@ static __always_inline u64 decay_load(u64 val, u64 n) if (!n) return val; - else if (unlikely(n > LOAD_AVG_PERIOD * 63)) + else if (unlikely(n > (LOAD_AVG_PERIOD * 63))) return 0; /* after bounds checking we can collapse to 32-bit */ @@ -2676,7 +2675,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa, /* delta_w is the amount already accumulated against our next period */ delta_w = sa->period_contrib; - if (delta + delta_w >= 1024) { + if ((delta + delta_w) >= 1024) { decayed = 1; /* how much left for next period will start over, we don't know yet */ @@ -2870,7 +2869,7 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg) if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg) update_tg_load_avg(cfs_rq, 0); - if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) { + if ((cpu == smp_processor_id()) && (&rq->cfs == cfs_rq)) { unsigned long max = rq->cpu_capacity_orig; /* @@ -2924,8 +2923,8 @@ skip_aging: static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)), - &se->avg, se->on_rq * scale_load_down(se->load.weight), - cfs_rq->curr == se, NULL); + &se->avg, (se->on_rq * scale_load_down(se->load.weight)), + (cfs_rq->curr == se), NULL); cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0); cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0); @@ -2944,8 +2943,8 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) migrated = !sa->last_update_time; if (!migrated) { __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, - se->on_rq * scale_load_down(se->load.weight), - cfs_rq->curr == se, NULL); + (se->on_rq * scale_load_down(se->load.weight)), + (cfs_rq->curr == se), NULL); } decayed = update_cfs_rq_load_avg(now, cfs_rq); @@ -2967,9 +2966,9 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) update_load_avg(se, 1); cfs_rq->runnable_load_avg = - max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0); + max_t(long, (cfs_rq->runnable_load_avg - se->avg.load_avg), 0); cfs_rq->runnable_load_sum = - max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0); + max_t(s64, (cfs_rq->runnable_load_sum - se->avg.load_sum), 0); } #ifndef CONFIG_64BIT @@ -3077,7 +3076,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) se->statistics.sum_sleep_runtime += delta; if (tsk) { - account_scheduler_latency(tsk, delta >> 10, 1); + account_scheduler_latency(tsk, (delta >> 10), 1); trace_sched_stat_sleep(tsk, delta); } } @@ -3110,9 +3109,9 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) if (unlikely(prof_on == SLEEP_PROFILING)) { profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), - delta >> 20); + (delta >> 20)); } - account_scheduler_latency(tsk, delta >> 10, 0); + account_scheduler_latency(tsk, (delta >> 10), 0); } } #endif @@ -3369,7 +3368,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) * least twice that of our own weight (i.e. dont track it * when there are only lesser-weight tasks around): */ - if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { + if (schedstat_enabled() && (rq_of(cfs_rq)->load.weight >= 2*se->load.weight)) { se->statistics.slice_max = max(se->statistics.slice_max, se->sum_exec_runtime - se->prev_sum_exec_runtime); } @@ -3417,20 +3416,20 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) second = curr; } - if (second && wakeup_preempt_entity(second, left) < 1) + if (second && (wakeup_preempt_entity(second, left) < 1)) se = second; } /* * Prefer last buddy, try to return the CPU to a preempted task. */ - if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) + if (cfs_rq->last && (wakeup_preempt_entity(cfs_rq->last, left) < 1)) se = cfs_rq->last; /* * Someone really wants this to run. If it's not unfair, run it. */ - if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) + if (cfs_rq->next && (wakeup_preempt_entity(cfs_rq->next, left) < 1)) se = cfs_rq->next; clear_buddies(cfs_rq, se); @@ -3547,7 +3546,7 @@ static inline u64 default_cfs_period(void) static inline u64 sched_cfs_bandwidth_slice(void) { - return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; + return ((u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC); } /* @@ -3580,7 +3579,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) if (unlikely(cfs_rq->throttle_count)) return cfs_rq->throttled_clock_task; - return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; + return (rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time); } /* returns 0 on failure to allocate runtime */ @@ -3617,7 +3616,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) if ((s64)(expires - cfs_rq->runtime_expires) > 0) cfs_rq->runtime_expires = expires; - return cfs_rq->runtime_remaining > 0; + return (cfs_rq->runtime_remaining > 0); } /* @@ -3629,7 +3628,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); /* if the deadline is ahead of our clock, nothing to do */ - if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) + if (likely((s64)((rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires)) < 0)) return; if (cfs_rq->runtime_remaining < 0) @@ -3683,7 +3682,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) { - return cfs_bandwidth_used() && cfs_rq->throttled; + return (cfs_bandwidth_used() && cfs_rq->throttled); } /* check whether cfs_rq, or any parent, is throttled */ @@ -3719,8 +3718,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data) #ifdef CONFIG_SMP if (!cfs_rq->throttle_count) { /* adjust cfs_rq_clock_task() */ - cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - - cfs_rq->throttled_clock_task; + cfs_rq->throttled_clock_task_time += (rq_clock_task(rq) - + cfs_rq->throttled_clock_task); } #endif @@ -3809,7 +3808,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) update_rq_clock(rq); raw_spin_lock(&cfs_b->lock); - cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; + cfs_b->throttled_time += (rq_clock(rq) - cfs_rq->throttled_clock); list_del_rcu(&cfs_rq->throttled_list); raw_spin_unlock(&cfs_b->lock); @@ -3925,7 +3924,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) * in us over-using our runtime if it is all used during this loop, but * only by limited amounts in that extreme case. */ - while (throttled && cfs_b->runtime > 0) { + while (throttled && (cfs_b->runtime > 0)) { runtime = cfs_b->runtime; raw_spin_unlock(&cfs_b->lock); /* we can't nest cfs_b->lock while distributing bandwidth */ @@ -4006,12 +4005,12 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) return; raw_spin_lock(&cfs_b->lock); - if (cfs_b->quota != RUNTIME_INF && - cfs_rq->runtime_expires == cfs_b->runtime_expires) { + if ((cfs_b->quota != RUNTIME_INF) && + (cfs_rq->runtime_expires == cfs_b->runtime_expires)) { cfs_b->runtime += slack_runtime; /* we are under rq->lock, defer unthrottling using a timer */ - if (cfs_b->runtime > sched_cfs_bandwidth_slice() && + if ((cfs_b->runtime > sched_cfs_bandwidth_slice()) && !list_empty(&cfs_b->throttled_cfs_rq)) start_cfs_slack_bandwidth(cfs_b); } @@ -4048,7 +4047,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) return; } - if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) + if ((cfs_b->quota != RUNTIME_INF) && (cfs_b->runtime > slice)) runtime = cfs_b->runtime; expires = cfs_b->runtime_expires; @@ -4095,7 +4094,7 @@ static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) if (!cfs_bandwidth_used()) return false; - if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) + if (likely(!cfs_rq->runtime_enabled || (cfs_rq->runtime_remaining > 0))) return false; /* @@ -4297,7 +4296,7 @@ static void hrtick_update(struct rq *rq) { struct task_struct *curr = rq->curr; - if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) + if (!hrtick_enabled(rq) || (curr->sched_class != &fair_sched_class)) return; if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) @@ -4663,7 +4662,7 @@ static unsigned long source_load(int cpu, int type) struct rq *rq = cpu_rq(cpu); unsigned long total = weighted_cpuload(cpu); - if (type == 0 || !sched_feat(LB_BIAS)) + if ((type == 0) || !sched_feat(LB_BIAS)) return total; return min(rq->cpu_load[type-1], total); @@ -4678,7 +4677,7 @@ static unsigned long target_load(int cpu, int type) struct rq *rq = cpu_rq(cpu); unsigned long total = weighted_cpuload(cpu); - if (type == 0 || !sched_feat(LB_BIAS)) + if ((type == 0) || !sched_feat(LB_BIAS)) return total; return max(rq->cpu_load[type-1], total); @@ -4713,7 +4712,7 @@ static void record_wakee(struct task_struct *p) * about the boundary, really active task won't care * about the loss. */ - if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { + if (time_after(jiffies, (current->wakee_flip_decay_ts + HZ))) { current->wakee_flips >>= 1; current->wakee_flip_decay_ts = jiffies; } @@ -5005,7 +5004,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, } } while (group = group->next, group != sd->groups); - if (!idlest || 100*this_load < imbalance*min_load) + if (!idlest || ((100*this_load) < imbalance*min_load)) return NULL; return idlest; } @@ -5028,7 +5027,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) if (idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); - if (idle && idle->exit_latency < min_exit_latency) { + if (idle && (idle->exit_latency < min_exit_latency)) { /* * We give priority to a CPU whose idle state * has the smallest exit latency irrespective @@ -5037,8 +5036,8 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) min_exit_latency = idle->exit_latency; latest_idle_timestamp = rq->idle_stamp; shallowest_idle_cpu = i; - } else if ((!idle || idle->exit_latency == min_exit_latency) && - rq->idle_stamp > latest_idle_timestamp) { + } else if ((!idle || (idle->exit_latency == min_exit_latency)) && + (rq->idle_stamp > latest_idle_timestamp)) { /* * If equal or no active idle state, then * the most recently idled CPU might have @@ -5049,14 +5048,14 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) } } else if (shallowest_idle_cpu == -1) { load = weighted_cpuload(i); - if (load < min_load || (load == min_load && i == this_cpu)) { + if ((load < min_load) || ((load == min_load) && (i == this_cpu))) { min_load = load; least_loaded_cpu = i; } } } - return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; + return (shallowest_idle_cpu != -1) ? shallowest_idle_cpu : least_loaded_cpu; } /* @@ -5074,7 +5073,7 @@ static int select_idle_sibling(struct task_struct *p, int target) /* * If the prevous cpu is cache affine and idle, don't be stupid. */ - if (i != target && cpus_share_cache(i, target) && idle_cpu(i)) + if ((i != target && cpus_share_cache(i, target)) && idle_cpu(i)) return i; /* @@ -5102,7 +5101,7 @@ static int select_idle_sibling(struct task_struct *p, int target) /* Ensure the entire group is idle */ for_each_cpu(i, sched_group_cpus(sg)) { - if (i == target || !idle_cpu(i)) + if ((i == target) || !idle_cpu(i)) goto next; } @@ -5226,7 +5225,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f } new_cpu = find_idlest_cpu(group, p, cpu); - if (new_cpu == -1 || new_cpu == cpu) { + if ((new_cpu == -1) || (new_cpu == cpu)) { /* Now try balancing at a lower domain level of cpu */ sd = sd->child; continue; @@ -5819,7 +5818,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env) delta = rq_clock_task(env->src_rq) - p->se.exec_start; - return delta < (s64)sysctl_sched_migration_cost; + return (delta < (s64)sysctl_sched_migration_cost); } #ifdef CONFIG_NUMA_BALANCING @@ -5866,7 +5865,7 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) dst_faults = task_faults(p, dst_nid); } - return dst_faults < src_faults; + return (dst_faults < src_faults); } #else @@ -6025,7 +6024,7 @@ static int detach_tasks(struct lb_env *env) * We don't want to steal all, otherwise we may be treated likewise, * which could at worst lead to a livelock crash. */ - if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) + if ((env->idle != CPU_NOT_IDLE) && (env->src_rq->nr_running <= 1)) break; p = list_first_entry(tasks, struct task_struct, se.group_node); @@ -6047,7 +6046,7 @@ static int detach_tasks(struct lb_env *env) load = task_h_load(p); - if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) + if (sched_feat(LB_MIN) && (load < 16) && !env->sd->nr_balance_failed) goto next; if ((load / 2) > env->imbalance) @@ -6341,7 +6340,7 @@ static unsigned long scale_rt_capacity(int cpu) used = div_u64(avg, total); if (likely(used < SCHED_CAPACITY_SCALE)) - return SCHED_CAPACITY_SCALE - used; + return (SCHED_CAPACITY_SCALE - used); return 1; } @@ -6901,8 +6900,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s /* * If there aren't any idle cpus, avoid creating some. */ - if (busiest->group_type == group_overloaded && - local->group_type == group_overloaded) { + if ((busiest->group_type == group_overloaded) && + (local->group_type == group_overloaded)) { load_above_capacity = busiest->sum_nr_running * SCHED_LOAD_SCALE; if (load_above_capacity > busiest->group_capacity) @@ -6919,7 +6918,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * (so that we can implement power-savings policies etc). Thus we look * for the minimum possible imbalance. */ - max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); + max_pull = min((busiest->avg_load - sds->avg_load), load_above_capacity); /* How much load to actually move to equalise the imbalance */ env->imbalance = min( @@ -6972,12 +6971,12 @@ static struct sched_group *find_busiest_group(struct lb_env *env) busiest = &sds.busiest_stat; /* ASYM feature bypasses nice load balance check */ - if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) && + if (((env->idle == CPU_IDLE) || (env->idle == CPU_NEWLY_IDLE)) && check_asym_packing(env, &sds)) return sds.busiest; /* There is no busy sibling group to pull tasks from */ - if (!sds.busiest || busiest->sum_nr_running == 0) + if (!sds.busiest || (busiest->sum_nr_running == 0)) goto out_balanced; sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) @@ -6992,7 +6991,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) goto force_balance; /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ - if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) && + if ((env->idle == CPU_NEWLY_IDLE) && group_has_capacity(env, local) && busiest->group_no_capacity) goto force_balance; @@ -7089,7 +7088,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, * which is not scaled with the cpu capacity. */ - if (rq->nr_running == 1 && wl > env->imbalance && + if ((rq->nr_running == 1) && (wl > env->imbalance) && !check_cpu_capacity(rq, env->sd)) continue; @@ -7104,7 +7103,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, * to: wl_i * capacity_j > wl_j * capacity_i; where j is * our previous maximum. */ - if (wl * busiest_capacity > busiest_load * capacity) { + if ((wl * busiest_capacity) > (busiest_load * capacity)) { busiest_load = wl; busiest_capacity = capacity; busiest = rq; @@ -7134,7 +7133,7 @@ static int need_active_balance(struct lb_env *env) * higher numbered CPUs in order to pack all tasks in the * lowest numbered CPUs. */ - if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu) + if ((sd->flags & SD_ASYM_PACKING) && (env->src_cpu > env->dst_cpu)) return 1; } @@ -7151,7 +7150,7 @@ static int need_active_balance(struct lb_env *env) return 1; } - return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); + return unlikely(sd->nr_balance_failed > (sd->cache_nice_tries+2)); } static int active_load_balance_cpu_stop(void *data); @@ -7314,7 +7313,7 @@ more_balance: * moreover subsequent load balance cycles should correct the * excess load moved. */ - if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { + if ((env.flags & LBF_DST_PINNED) && (env.imbalance > 0)) { /* Prevent to re-select dst_cpu via env's cpus */ cpumask_clear_cpu(env.dst_cpu, env.cpus); @@ -7338,7 +7337,7 @@ more_balance: if (sd_parent) { int *group_imbalance = &sd_parent->groups->sgc->imbalance; - if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) + if ((env.flags & LBF_SOME_PINNED) && (env.imbalance > 0)) *group_imbalance = 1; } @@ -7448,7 +7447,7 @@ out_all_pinned: out_one_pinned: /* tune up the balancing interval */ if (((env.flags & LBF_ALL_PINNED) && - sd->balance_interval < MAX_PINNED_INTERVAL) || + (sd->balance_interval < MAX_PINNED_INTERVAL)) || (sd->balance_interval < sd->max_interval)) sd->balance_interval *= 2; @@ -7524,7 +7523,7 @@ static int idle_balance(struct rq *this_rq) if (!(sd->flags & SD_LOAD_BALANCE)) continue; - if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { + if (this_rq->avg_idle < (curr_cost + sd->max_newidle_lb_cost)) { update_next_balance(sd, 0, &next_balance); break; } @@ -7549,7 +7548,7 @@ static int idle_balance(struct rq *this_rq) * Stop searching for tasks to pull if there are * now runnable tasks on this rq. */ - if (pulled_task || this_rq->nr_running > 0) + if (pulled_task || (this_rq->nr_running > 0)) break; } rcu_read_unlock(); @@ -7600,7 +7599,7 @@ static int active_load_balance_cpu_stop(void *data) raw_spin_lock_irq(&busiest_rq->lock); /* make sure the requested cpu hasn't gone down in the meantime */ - if (unlikely(busiest_cpu != smp_processor_id() || + if (unlikely((busiest_cpu != smp_processor_id()) || !busiest_rq->active_balance)) goto out_unlock; @@ -7676,7 +7675,7 @@ static inline int find_new_ilb(void) { int ilb = cpumask_first(nohz.idle_cpus_mask); - if (ilb < nr_cpu_ids && idle_cpu(ilb)) + if ((ilb < nr_cpu_ids) && idle_cpu(ilb)) return ilb; return nr_cpu_ids; @@ -7856,7 +7855,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) break; } - interval = get_sd_balance_interval(sd, idle != CPU_IDLE); + interval = get_sd_balance_interval(sd, (idle != CPU_IDLE)); need_serialize = sd->flags & SD_SERIALIZE; if (need_serialize) { @@ -7864,7 +7863,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) goto out; } - if (time_after_eq(jiffies, sd->last_balance + interval)) { + if (time_after_eq(jiffies, (sd->last_balance + interval))) { if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { /* * The LBF_DST_PINNED logic could have changed @@ -7874,12 +7873,12 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; } sd->last_balance = jiffies; - interval = get_sd_balance_interval(sd, idle != CPU_IDLE); + interval = get_sd_balance_interval(sd, (idle != CPU_IDLE)); } if (need_serialize) spin_unlock(&balancing); out: - if (time_after(next_balance, sd->last_balance + interval)) { + if (time_after(next_balance, (sd->last_balance + interval))) { next_balance = sd->last_balance + interval; update_next_balance = 1; } @@ -7931,12 +7930,12 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; - if (idle != CPU_IDLE || + if ((idle != CPU_IDLE) || !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) goto end; for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { - if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) + if ((balance_cpu == this_cpu) || !idle_cpu(balance_cpu)) continue; /* -- 2.7.4