dd413844da674d07f37906501cbf26d861d6489c Issues on kernel/rcu/tree.c file (2) . kernel/rcu/tree.c | 78 +++++++++++++++++++++++++++---------------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 64d410c..64424b3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -230,7 +230,7 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) */ static int rcu_gp_in_progress(struct rcu_state *rsp) { - return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum); + return (READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum)); } /* @@ -565,8 +565,8 @@ EXPORT_SYMBOL_GPL(rcutorture_record_progress); static int cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) { - return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] && - rdp->nxttail[RCU_DONE_TAIL] != NULL; + return (&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]) && + (rdp->nxttail[RCU_DONE_TAIL] != NULL); } /* @@ -610,7 +610,7 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) if (*rdp->nxttail[RCU_NEXT_READY_TAIL]) return true; /* Yes, CPU has newly registered callbacks. */ for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) - if (rdp->nxttail[i - 1] != rdp->nxttail[i] && + if ((rdp->nxttail[i - 1] != rdp->nxttail[i]) && ULONG_CMP_LT(READ_ONCE(rsp->completed), rdp->nxtcompleted[i])) return true; /* Yes, CBs for future grace period. */ @@ -652,7 +652,7 @@ static void rcu_eqs_enter_common(long long oldval, bool user) atomic_inc(&rdtp->dynticks); smp_mb__after_atomic(); /* Force ordering with next sojourn. */ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && - atomic_read(&rdtp->dynticks) & 0x1); + (atomic_read(&rdtp->dynticks) & 0x1)); rcu_dynticks_task_enter(); /* @@ -752,7 +752,7 @@ void rcu_irq_exit(void) oldval = rdtp->dynticks_nesting; rdtp->dynticks_nesting--; WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && - rdtp->dynticks_nesting < 0); + (rdtp->dynticks_nesting < 0)); if (rdtp->dynticks_nesting) trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); else @@ -817,7 +817,7 @@ static void rcu_eqs_exit(bool user) rdtp = this_cpu_ptr(&rcu_dynticks); oldval = rdtp->dynticks_nesting; - WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (oldval < 0)); if (oldval & DYNTICK_TASK_NEST_MASK) { rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; } else { @@ -890,7 +890,7 @@ void rcu_irq_enter(void) oldval = rdtp->dynticks_nesting; rdtp->dynticks_nesting++; WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && - rdtp->dynticks_nesting == 0); + (rdtp->dynticks_nesting == 0)); if (oldval) trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting); else @@ -1114,7 +1114,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, * read-side critical section that started before the beginning * of the current RCU grace period. */ - if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) { + if (((curr & 0x1) == 0) || UINT_CMP_GE(curr, snap + 2)) { trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); rdp->dynticks_fqs++; return 1; @@ -1202,7 +1202,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp) */ static const char *gp_state_getname(short gs) { - if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) + if ((gs < 0) || (gs >= ARRAY_SIZE(gp_state_names))) return "???"; return gp_state_names[gs]; } @@ -1217,7 +1217,7 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) j = jiffies; gpa = READ_ONCE(rsp->gp_activity); - if (j - gpa > 2 * HZ) { + if ((j - gpa) > (2 * HZ)) { pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx\n", rsp->name, j - gpa, rsp->gpnum, rsp->completed, @@ -1241,7 +1241,7 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp) rcu_for_each_leaf_node(rsp, rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); if (rnp->qsmask != 0) { - for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) + for (cpu = 0; cpu <= (rnp->grphi - rnp->grplo); cpu++) if (rnp->qsmask & (1UL << cpu)) dump_cpu_task(rnp->grplo + cpu); } @@ -1269,7 +1269,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) return; } WRITE_ONCE(rsp->jiffies_stall, - jiffies + 3 * rcu_jiffies_till_stall_check() + 3); + jiffies + (3 * rcu_jiffies_till_stall_check()) + 3); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); /* @@ -1284,7 +1284,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) raw_spin_lock_irqsave_rcu_node(rnp, flags); ndetected += rcu_print_task_stall(rnp); if (rnp->qsmask != 0) { - for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) + for (cpu = 0; cpu <= (rnp->grphi - rnp->grplo); cpu++) if (rnp->qsmask & (1UL << cpu)) { print_cpu_stall_info(rsp, rnp->grplo + cpu); @@ -1355,7 +1355,7 @@ static void print_cpu_stall(struct rcu_state *rsp) raw_spin_lock_irqsave_rcu_node(rnp, flags); if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) WRITE_ONCE(rsp->jiffies_stall, - jiffies + 3 * rcu_jiffies_till_stall_check() + 3); + jiffies + (3 * rcu_jiffies_till_stall_check()) + 3); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); /* @@ -1484,14 +1484,14 @@ static unsigned long rcu_cbs_completed(struct rcu_state *rsp, * period might have started, but just not yet gotten around * to initializing the current non-root rcu_node structure. */ - if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed) - return rnp->completed + 1; + if ((rcu_get_root(rsp) == rnp) && (rnp->gpnum == rnp->completed)) + return (rnp->completed + 1); /* * Otherwise, wait for a possible partial grace period and * then the subsequent full grace period. */ - return rnp->completed + 2; + return (rnp->completed + 2); } /* @@ -1547,8 +1547,8 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, * incremented. But that is OK, as it will just result in our * doing some extra useless work. */ - if (rnp->gpnum != rnp->completed || - READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) { + if ((rnp->gpnum != rnp->completed) || + (READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed))) { rnp->need_future_gp[c & 0x1]++; trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); goto out; @@ -1629,7 +1629,7 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) */ static void rcu_gp_kthread_wake(struct rcu_state *rsp) { - if (current == rsp->gp_kthread || + if ((current == rsp->gp_kthread) || !READ_ONCE(rsp->gp_flags) || !rsp->gp_kthread) return; @@ -1780,7 +1780,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); } - if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) { + if ((rdp->gpnum != rnp->gpnum) || unlikely(READ_ONCE(rdp->gpwrap))) { /* * If the current grace period is waiting for this CPU, * set up to detect a quiescent state, otherwise don't @@ -1806,7 +1806,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) local_irq_save(flags); rnp = rdp->mynode; if ((rdp->gpnum == READ_ONCE(rnp->gpnum) && - rdp->completed == READ_ONCE(rnp->completed) && + (rdp->completed == READ_ONCE(rnp->completed)) && !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ local_irq_restore(flags); @@ -2272,7 +2272,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, /* Walk up the rcu_node hierarchy. */ for (;;) { - if (!(rnp->qsmask & mask) || rnp->gpnum != gps) { + if (!(rnp->qsmask & mask) || (rnp->gpnum != gps)) { /* * Our bit has already been cleared, or the @@ -2330,8 +2330,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp, unsigned long mask; struct rcu_node *rnp_p; - if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || - rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { + if ((rcu_state_p == &rcu_sched_state) || (rsp != rcu_state_p) || + (rnp->qsmask != 0) || rcu_preempt_blocked_readers_cgp(rnp)) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return; /* Still need more quiescent states! */ } @@ -2369,8 +2369,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) rnp = rdp->mynode; raw_spin_lock_irqsave_rcu_node(rnp, flags); if ((rdp->cpu_no_qs.b.norm && - rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) || - rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum || + (rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))) || + (rdp->gpnum != rnp->gpnum) || (rnp->completed == rnp->gpnum) || rdp->gpwrap) { /* @@ -2630,7 +2630,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) rcu_adopt_orphan_cbs(rsp, flags); raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags); - WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, + WARN_ONCE((rdp->qlen != 0) || (rdp->nxtlist != NULL), "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", cpu, rdp->qlen, rdp->nxtlist); } @@ -2682,7 +2682,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) count_lazy++; list = next; /* Stop only if limit reached and CPU has something to do. */ - if (++count >= bl && + if ((++count >= bl) && (need_resched() || (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) break; @@ -2709,14 +2709,14 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) rdp->n_cbs_invoked += count; /* Reinstate batch limit if we have worked down the excess. */ - if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) + if ((rdp->blimit == LONG_MAX) && (rdp->qlen <= qlowmark)) rdp->blimit = blimit; /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ - if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { + if ((rdp->qlen == 0) && (rdp->qlen_last_fqs_check != 0)) { rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = rsp->n_force_qs; - } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) + } else if (((rdp->qlen < (rdp->qlen_last_fqs_check - qhimark))) rdp->qlen_last_fqs_check = rdp->qlen; WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0)); @@ -2854,7 +2854,7 @@ static void force_quiescent_state(struct rcu_state *rsp) /* Funnel through hierarchy to reduce memory contention. */ rnp = __this_cpu_read(rsp->rda->mynode); - for (; rnp != NULL; rnp = rnp->parent) { + for (; (rnp != NULL); rnp = rnp->parent) { ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || !raw_spin_trylock(&rnp->fqslock); if (rnp_old != NULL) @@ -2982,7 +2982,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, * invoking force_quiescent_state() if the newly enqueued callback * is the only one waiting for a grace period to complete. */ - if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { + if (unlikely(rdp->qlen > (rdp->qlen_last_fqs_check + qhimark))) { /* Are we ignoring a completed grace period? */ note_gp_changes(rsp, rdp); @@ -3048,7 +3048,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, rdp = this_cpu_ptr(rsp->rda); /* Add the callback to our list. */ - if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) { + if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || (cpu != -1)) { int offline; if (cpu != -1) @@ -3889,11 +3889,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) /* Is the RCU core waiting for a quiescent state from this CPU? */ if (rcu_scheduler_fully_active && rdp->core_needs_qs && rdp->cpu_no_qs.b.norm && - rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) { + (rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))) { rdp->n_rp_core_needs_qs++; } else if (rdp->core_needs_qs && (!rdp->cpu_no_qs.b.norm || - rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) { + (rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr)))) { rdp->n_rp_report_qs++; return 1; } @@ -4556,7 +4556,7 @@ static void __init rcu_init_geometry(void) * The tree must be able to accommodate the configured number of CPUs. * If this limit is exceeded, fall back to the compile-time values. */ - if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { + if (nr_cpu_ids > (rcu_capacity[RCU_NUM_LVLS - 1])) { rcu_fanout_leaf = RCU_FANOUT_LEAF; WARN_ON(1); return; -- 2.7.4