af8ab4cf2386ee4357c1bbc371d762e5a159faa6 Issues on mm/page_alloc.c file . mm/page_alloc.c | 86 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 898fe3f..33d048f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -550,7 +550,7 @@ static int __init debug_guardpage_minorder_setup(char *buf) { unsigned long res; - if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { + if ((kstrtoul(buf, 10, &res) < 0) || (res > MAX_ORDER / 2)) { pr_err("Bad debug_guardpage_minorder value\n"); return 0; } @@ -633,16 +633,16 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, if (!pfn_valid_within(page_to_pfn(buddy))) return 0; - if (page_is_guard(buddy) && page_order(buddy) == order) { + if (page_is_guard(buddy) && (page_order(buddy) == order)) { if (page_zone_id(page) != page_zone_id(buddy)) return 0; - VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); + VM_BUG_ON_PAGE((page_count(buddy) != 0), buddy); return 1; } - if (PageBuddy(buddy) && page_order(buddy) == order) { + if (PageBuddy(buddy) && (page_order(buddy) == order)) { /* * zone check is done late to avoid uselessly * calculating zone/node ids for pages that could @@ -651,7 +651,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, if (page_zone_id(page) != page_zone_id(buddy)) return 0; - VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); + VM_BUG_ON_PAGE((page_count(buddy) != 0), buddy); return 1; } @@ -746,7 +746,7 @@ continue_merging: buddy = page + (buddy_idx - page_idx); buddy_mt = get_pageblock_migratetype(buddy); - if (migratetype != buddy_mt + if ((migratetype != buddy_mt) && (is_migrate_isolate(migratetype) || is_migrate_isolate(buddy_mt))) goto done_merging; @@ -986,7 +986,7 @@ static void init_reserved_page(unsigned long pfn) for (zid = 0; zid < MAX_NR_ZONES; zid++) { struct zone *zone = &pgdat->node_zones[zid]; - if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) + if ((pfn >= zone->zone_start_pfn) && (pfn < zone_end_pfn(zone))) break; } __init_single_pfn(pfn, zid, nid); @@ -1008,7 +1008,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) unsigned long start_pfn = PFN_DOWN(start); unsigned long end_pfn = PFN_UP(end); - for (; start_pfn < end_pfn; start_pfn++) { + for (; (start_pfn < end_pfn); start_pfn++) { if (pfn_valid(start_pfn)) { struct page *page = pfn_to_page(start_pfn); @@ -1028,7 +1028,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order) int i, bad = 0; VM_BUG_ON_PAGE(PageTail(page), page); - VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); + VM_BUG_ON_PAGE((compound && (compound_order(page) != order)), page); trace_mm_page_free(page, order); kmemcheck_free_shadow(page, order); @@ -1124,7 +1124,7 @@ static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, int nid; nid = __early_pfn_to_nid(pfn, state); - if (nid >= 0 && nid != node) + if ((nid >= 0) && (nid != node)) return false; return true; } @@ -1206,7 +1206,7 @@ void set_zone_contiguous(struct zone *zone) unsigned long block_end_pfn; block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); - for (; block_start_pfn < zone_end_pfn(zone); + for (; (block_start_pfn < zone_end_pfn(zone)); block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { @@ -1236,7 +1236,7 @@ static void __init deferred_free_range(struct page *page, return; /* Free a large naturally-aligned chunk if possible */ - if (nr_pages == MAX_ORDER_NR_PAGES && + if ((nr_pages == MAX_ORDER_NR_PAGES) && (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) { set_pageblock_migratetype(page, MIGRATE_MOVABLE); __free_pages_boot_core(page, pfn, MAX_ORDER-1); @@ -1723,9 +1723,9 @@ static bool can_steal_fallback(unsigned int order, int start_mt) if (order >= pageblock_order) return true; - if (order >= pageblock_order / 2 || - start_mt == MIGRATE_RECLAIMABLE || - start_mt == MIGRATE_UNMOVABLE || + if ((order >= pageblock_order / 2) || + (start_mt == MIGRATE_RECLAIMABLE) || + (start_mt == MIGRATE_UNMOVABLE) || page_group_by_mobility_disabled) return true; @@ -1822,7 +1822,7 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, /* Yoink! */ mt = get_pageblock_migratetype(page); - if (mt != MIGRATE_HIGHATOMIC && + if ((mt != MIGRATE_HIGHATOMIC) && !is_migrate_isolate(mt) && !is_migrate_cma(mt)) { zone->nr_reserved_highatomic += pageblock_nr_pages; set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); @@ -2301,7 +2301,7 @@ int __isolate_free_page(struct page *page, unsigned int order) set_page_owner(page, order, __GFP_MOVABLE); /* Set the pageblock if the isolated page is at least a pageblock */ - if (order >= pageblock_order - 1) { + if (order >= (pageblock_order - 1)) { struct page *endpage = page + (1 << order) - 1; for (; page < endpage; page += pageblock_nr_pages) { int mt = get_pageblock_migratetype(page); @@ -2535,7 +2535,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, * are not met, then a high-order request also cannot go ahead * even if a suitable page happened to be free. */ - if (free_pages <= min + z->lowmem_reserve[classzone_idx]) + if (free_pages <= (min + z->lowmem_reserve[classzone_idx])) return false; /* If this is an order-0 request then the watermark is fine */ @@ -2590,7 +2590,7 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order, #ifdef CONFIG_NUMA static bool zone_local(struct zone *local_zone, struct zone *zone) { - return local_zone->node == zone->node; + return (local_zone->node == zone->node); } static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) @@ -2705,7 +2705,7 @@ zonelist_scan: if (alloc_flags & ALLOC_NO_WATERMARKS) goto try_this_zone; - if (zone_reclaim_mode == 0 || + if ((zone_reclaim_mode == 0) || !zone_allows_reclaim(ac->preferred_zone, zone)) continue; @@ -3107,7 +3107,7 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) static inline bool is_thp_gfp_mask(gfp_t gfp_mask) { - return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE; + return ((gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE); } static inline struct page * @@ -3250,7 +3250,7 @@ retry: * want to further increase allocation latency, unless it is * khugepaged trying to collapse. */ - if (contended_compaction == COMPACT_CONTENDED_SCHED + if ((contended_compaction == COMPACT_CONTENDED_SCHED) && !(current->flags & PF_KTHREAD)) goto nopage; } @@ -3275,7 +3275,7 @@ retry: /* Keep reclaiming pages as long as there is reasonable progress */ pages_reclaimed += did_some_progress; - if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) || + if ((did_some_progress && (order <= PAGE_ALLOC_COSTLY_ORDER)) || ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) { /* Wait for some write requests to complete then retry */ wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50); @@ -4097,11 +4097,11 @@ char numa_zonelist_order[16] = "default"; static int __parse_numa_zonelist_order(char *s) { - if (*s == 'd' || *s == 'D') { + if ((*s == 'd') || (*s == 'D')) { user_zonelist_order = ZONELIST_ORDER_DEFAULT; - } else if (*s == 'n' || *s == 'N') { + } else if ((*s == 'n') || (*s == 'N')) { user_zonelist_order = ZONELIST_ORDER_NODE; - } else if (*s == 'z' || *s == 'Z') { + } else if ((*s == 'z') || (*s == 'Z')) { user_zonelist_order = ZONELIST_ORDER_ZONE; } else { pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s); @@ -4245,7 +4245,7 @@ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) struct zonelist *zonelist; zonelist = &pgdat->node_zonelists[0]; - for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) + for (j = 0; (zonelist->_zonerefs[j].zone != NULL); j++) ; j = build_zonelists_node(NODE_DATA(node), zonelist, j); zonelist->_zonerefs[j].zone = NULL; @@ -4661,7 +4661,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, struct memblock_region *r = NULL, *tmp; #endif - if (highest_memmap_pfn < end_pfn - 1) + if (highest_memmap_pfn < (end_pfn - 1)) highest_memmap_pfn = end_pfn - 1; /* @@ -4693,7 +4693,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, * ZONE_MOVABLE not ZONE_NORMAL. skip it. */ if (!mirrored_kernelcore && zone_movable_pfn[nid]) - if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid]) + if ((zone == ZONE_NORMAL) && (pfn >= zone_movable_pfn[nid])) continue; /* @@ -4701,14 +4701,14 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, * kernel memory layout. If zone==ZONE_MOVABLE but memory is * mirrored, it's an overlapped memmap init. skip it. */ - if (mirrored_kernelcore && zone == ZONE_MOVABLE) { - if (!r || pfn >= memblock_region_memory_end_pfn(r)) { + if (mirrored_kernelcore && (zone == ZONE_MOVABLE)) { + if (!r || (pfn >= memblock_region_memory_end_pfn(r))) { for_each_memblock(memory, tmp) if (pfn < memblock_region_memory_end_pfn(tmp)) break; r = tmp; } - if (pfn >= memblock_region_memory_base_pfn(r) && + if ((pfn >= memblock_region_memory_base_pfn(r)) && memblock_is_mirror(r)) { /* already initialized as NORMAL */ pfn = memblock_region_memory_end_pfn(r); @@ -5004,7 +5004,7 @@ int __meminit __early_pfn_to_nid(unsigned long pfn, unsigned long start_pfn, end_pfn; int nid; - if (state->last_start <= pfn && pfn < state->last_end) + if ((state->last_start <= pfn) && (pfn < state->last_end)) return state->last_nid; nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); @@ -5164,7 +5164,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, zone_start_pfn, zone_end_pfn); /* Check that this node has pages within the zone's required range */ - if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) + if ((*zone_end_pfn < node_start_pfn) || (*zone_start_pfn > node_end_pfn)) return 0; /* Move the zone boundaries inside the node if necessary */ @@ -5248,11 +5248,11 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, end_pfn = clamp(memblock_region_memory_end_pfn(r), zone_start_pfn, zone_end_pfn); - if (zone_type == ZONE_MOVABLE && + if ((zone_type == ZONE_MOVABLE) && memblock_is_mirror(r)) nr_absent += end_pfn - start_pfn; - if (zone_type == ZONE_NORMAL && + if ((zone_type == ZONE_NORMAL) && !memblock_is_mirror(r)) nr_absent += end_pfn - start_pfn; } @@ -5427,7 +5427,7 @@ static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, * populated regions may not naturally algined on page boundary. * So the (present_pages >> 4) heuristic is a tradeoff for that. */ - if (spanned_pages > present_pages + (present_pages >> 4) && + if ((spanned_pages > (present_pages + (present_pages >> 4))) && IS_ENABLED(CONFIG_SPARSEMEM)) pages = present_pages; @@ -5832,7 +5832,7 @@ static void __init find_zone_movable_pfns_for_nodes(void) * If kernelcore was not specified or kernelcore size is larger * than totalpages, there is no ZONE_MOVABLE. */ - if (!required_kernelcore || required_kernelcore >= totalpages) + if (!required_kernelcore || (required_kernelcore >= totalpages)) goto out; /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ @@ -5923,7 +5923,7 @@ restart: * satisfied */ usable_nodes--; - if (usable_nodes && required_kernelcore > usable_nodes) + if (usable_nodes && (required_kernelcore > usable_nodes)) goto restart; out2: @@ -6212,7 +6212,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self, { int cpu = (unsigned long)hcpu; - if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { + if ((action == CPU_DEAD) || (action == CPU_DEAD_FROZEN)) { lru_add_drain_cpu(cpu); drain_pages(cpu); @@ -6595,7 +6595,7 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, /* Sanity checking to avoid pcp imbalance */ if (percpu_pagelist_fraction && - percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { + (percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION)) { percpu_pagelist_fraction = old_percpu_pagelist_fraction; ret = -EINVAL; goto out; @@ -7053,7 +7053,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, * So, just fall through. We will check it in test_pages_isolated(). */ ret = __alloc_contig_migrate_range(&cc, start, end); - if (ret && ret != -EBUSY) + if (ret && (ret != -EBUSY)) goto done; /* @@ -7095,7 +7095,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, * in this case to report failed page properly * on tracepoint in test_pages_isolated() */ - if (outer_start + (1UL << order) <= start) + if ((outer_start + (1UL << order)) <= start) outer_start = start; } -- 2.7.4