3b869ce65024505522f5e24fa696a59b3ab52f02 Issues on mm/memcontrol.c file . mm/memcontrol.c | 56 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index fe787f5..7251d17 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -95,7 +95,7 @@ int do_swap_account __read_mostly; /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { - return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; + return (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account); } static const char * const mem_cgroup_stat_names[] = { @@ -680,7 +680,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, val = __this_cpu_read(memcg->stat->nr_page_events); next = __this_cpu_read(memcg->stat->targets[target]); /* from time_after() in jiffies.h */ - if ((long)next - (long)val < 0) { + if (((long)next - (long)val) < 0) { switch (target) { case MEM_CGROUP_TARGET_THRESH: next = val + THRESHOLDS_EVENTS_TARGET; @@ -800,7 +800,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, if (prev && !reclaim) pos = prev; - if (!root->use_hierarchy && root != root_mem_cgroup) { + if (!root->use_hierarchy && (root != root_mem_cgroup)) { if (prev) goto out; return root; @@ -814,7 +814,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); iter = &mz->iter[reclaim->priority]; - if (prev && reclaim->generation != iter->generation) + if (prev && (reclaim->generation != iter->generation)) goto out_unlock; while (1) { @@ -902,7 +902,7 @@ void mem_cgroup_iter_break(struct mem_cgroup *root, { if (!root) root = root_mem_cgroup; - if (prev && prev != root) + if (prev && (prev != root)) css_put(&prev->css); } @@ -1126,7 +1126,7 @@ unlock: static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) { - if (mc.moving_task && current != mc.moving_task) { + if (mc.moving_task && (current != mc.moving_task)) { if (mem_cgroup_under_move(memcg)) { DEFINE_WAIT(wait); prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); @@ -1185,7 +1185,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) pr_cont(":"); for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { - if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) + if ((i == MEM_CGROUP_STAT_SWAP) && !do_swap_account) continue; pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], K(mem_cgroup_read_stat(iter, i))); @@ -1288,10 +1288,10 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, break; }; points = oom_badness(task, memcg, NULL, totalpages); - if (!points || points < chosen_points) + if (!points || (points < chosen_points)) continue; /* Prefer thread group leaders for display purposes */ - if (points == chosen_points && + if ((points == chosen_points) && thread_group_leader(chosen)) continue; @@ -1735,7 +1735,7 @@ void unlock_page_memcg(struct page *page) { struct mem_cgroup *memcg = page->mem_cgroup; - if (memcg && memcg->move_lock_task == current) { + if (memcg && (memcg->move_lock_task == current)) { unsigned long flags = memcg->move_lock_flags; memcg->move_lock_task = NULL; @@ -1783,7 +1783,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) return ret; stock = &get_cpu_var(memcg_stock); - if (memcg == stock->cached && stock->nr_pages >= nr_pages) { + if ((memcg == stock->cached) && (stock->nr_pages >= nr_pages)) { stock->nr_pages -= nr_pages; ret = true; } @@ -1880,7 +1880,7 @@ static int memcg_cpu_hotplug_callback(struct notifier_block *nb, if (action == CPU_ONLINE) return NOTIFY_OK; - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) + if ((action != CPU_DEAD) && (action != CPU_DEAD_FROZEN)) return NOTIFY_OK; stock = &per_cpu(memcg_stock, cpu); @@ -1968,7 +1968,7 @@ retry: */ if (unlikely(test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current) || - current->flags & PF_EXITING)) + (current->flags & PF_EXITING))) goto force; if (unlikely(task_in_memcg_oom(current))) @@ -2002,7 +2002,7 @@ retry: * unlikely to succeed so close to the limit, and we fall back * to regular pages anyway in case of failure. */ - if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) + if (nr_reclaimed && (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))) goto retry; /* * At task move, charge accounts can be doubly counted. So, it's @@ -2610,8 +2610,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, * reclaiming anything. */ if (!nr_reclaimed && - (next_mz == NULL || - loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) + ((next_mz == NULL) || + (loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))) break; } while (!nr_reclaimed); if (next_mz) @@ -3139,7 +3139,7 @@ static int memcg_stat_show(struct seq_file *m, void *v) BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { - if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) + if ((i == MEM_CGROUP_STAT_SWAP) && !do_memsw_account()) continue; seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); @@ -3168,10 +3168,10 @@ static int memcg_stat_show(struct seq_file *m, void *v) for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { unsigned long long val = 0; - if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account()) + if ((i == MEM_CGROUP_STAT_SWAP) && !do_memsw_account()) continue; for_each_mem_cgroup_tree(mi, memcg) - val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; + val += (mem_cgroup_read_stat(mi, i) * PAGE_SIZE); seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); } @@ -3274,7 +3274,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) * If none of thresholds below usage is crossed, we read * only one element of the array here. */ - for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) + for (; ((i >= 0) && unlikely(t->entries[i].threshold > usage)); i--) eventfd_signal(t->entries[i].eventfd, 1); /* i = current_threshold + 1 */ @@ -3286,7 +3286,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) * If none of thresholds above usage is crossed, we read * only one element of the array here. */ - for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) + for (; ((i < t->size) && unlikely(t->entries[i].threshold <= usage)); i++) eventfd_signal(t->entries[i].eventfd, 1); /* Update current_threshold */ @@ -4088,7 +4088,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) int node; size = sizeof(struct mem_cgroup); - size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); + size += (nr_node_ids * sizeof(struct mem_cgroup_per_node *)); memcg = kzalloc(size, GFP_KERNEL); if (!memcg) @@ -4528,7 +4528,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, } /* There is a swap entry and a page doesn't exist or isn't charged */ if (ent.val && !ret && - mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { + (mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent))) { ret = MC_TARGET_SWAP; if (target) target->ent = ent; @@ -4588,7 +4588,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, if (pmd_trans_unstable(pmd)) return 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); - for (; addr != end; pte++, addr += PAGE_SIZE) + for (; (addr != end); pte++, addr += PAGE_SIZE) if (get_mctgt_type(vma, addr, *pte, NULL)) mc.precharge++; /* increment precharge temporarily */ pte_unmap_unlock(pte - 1, ptl); @@ -4800,7 +4800,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, return 0; retry: pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); - for (; addr != end; addr += PAGE_SIZE) { + for (; (addr != end); addr += PAGE_SIZE) { pte_t ptent = *(pte++); swp_entry_t ent; @@ -5824,7 +5824,7 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) return nr_swap_pages; - for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) + for (; (memcg != root_mem_cgroup); memcg = parent_mem_cgroup(memcg)) nr_swap_pages = min_t(long, nr_swap_pages, READ_ONCE(memcg->swap.limit) - page_counter_read(&memcg->swap)); @@ -5846,8 +5846,8 @@ bool mem_cgroup_swap_full(struct page *page) if (!memcg) return false; - for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) - if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit) + for (; (memcg != root_mem_cgroup); memcg = parent_mem_cgroup(memcg)) + if ((page_counter_read(&memcg->swap) * 2) >= memcg->swap.limit) return true; return false; -- 2.7.4