7f6350a39a5d1d4b2305b29220fec776cfe2cf56 Issues on mm/compaction.c files.. mm/compaction.c | 74 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index f8e925e..c6295fd 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -71,7 +71,7 @@ static void map_pages(struct list_head *list) static inline bool migrate_async_suitable(int migratetype) { - return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; + return is_migrate_cma(migratetype) || (migratetype == MIGRATE_MOVABLE); } #ifdef CONFIG_COMPACTION @@ -142,8 +142,8 @@ bool compaction_restarting(struct zone *zone, int order) if (order < zone->compact_order_failed) return false; - return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && - zone->compact_considered >= 1UL << zone->compact_defer_shift; + return (zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT) && + (zone->compact_considered >= (1UL << zone->compact_defer_shift)); } /* Returns true if the pageblock should be scanned for pages to isolate. */ @@ -239,8 +239,8 @@ static void update_pageblock_skip(struct compact_control *cc, if (migrate_scanner) { if (pfn > zone->compact_cached_migrate_pfn[0]) zone->compact_cached_migrate_pfn[0] = pfn; - if (cc->mode != MIGRATE_ASYNC && - pfn > zone->compact_cached_migrate_pfn[1]) + if ((cc->mode != MIGRATE_ASYNC) && + (pfn > zone->compact_cached_migrate_pfn[1])) zone->compact_cached_migrate_pfn[1] = pfn; } else { if (pfn < zone->compact_cached_free_pfn) @@ -367,7 +367,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, cursor = pfn_to_page(blockpfn); /* Isolate free pages. */ - for (; blockpfn < end_pfn; blockpfn++, cursor++) { + for (; (blockpfn < end_pfn); blockpfn++, cursor++) { int isolated, i; struct page *page = cursor; @@ -398,8 +398,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, unsigned int comp_order = compound_order(page); if (likely(comp_order < MAX_ORDER)) { - blockpfn += (1UL << comp_order) - 1; - cursor += (1UL << comp_order) - 1; + blockpfn += ((1UL << comp_order) - 1); + cursor += ((1UL << comp_order) - 1); } goto isolate_fail; @@ -446,13 +446,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, if (isolated) { cc->nr_freepages += isolated; if (!strict && - cc->nr_migratepages <= cc->nr_freepages) { + (cc->nr_migratepages <= cc->nr_freepages)) { blockpfn += isolated; break; } - blockpfn += isolated - 1; - cursor += isolated - 1; + blockpfn += (isolated - 1); + cursor += (isolated - 1); continue; } @@ -482,7 +482,7 @@ isolate_fail: * pages requested were isolated. If there were any failures, 0 is * returned and CMA will fail. */ - if (strict && blockpfn < end_pfn) + if (strict && (blockpfn < end_pfn)) total_isolated = 0; if (locked) @@ -524,7 +524,7 @@ isolate_freepages_range(struct compact_control *cc, block_start_pfn = cc->zone->zone_start_pfn; block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); - for (; pfn < end_pfn; pfn += isolated, + for (; (pfn < end_pfn); pfn += isolated, block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { /* Protect pfn from changing by isolate_freepages_block */ @@ -606,7 +606,7 @@ static bool too_many_isolated(struct zone *zone) isolated = zone_page_state(zone, NR_ISOLATED_FILE) + zone_page_state(zone, NR_ISOLATED_ANON); - return isolated > (inactive + active) / 2; + return (isolated > (inactive + active) / 2); } /** @@ -660,7 +660,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, return 0; /* Time to isolate some pages for migration */ - for (; low_pfn < end_pfn; low_pfn++) { + for (; (low_pfn < end_pfn); low_pfn++) { bool is_lru; /* @@ -696,8 +696,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * a valid page order. Consider only values in the * valid order range to prevent low_pfn overflow. */ - if (freepage_order > 0 && freepage_order < MAX_ORDER) - low_pfn += (1UL << freepage_order) - 1; + if ((freepage_order > 0) && (freepage_order < MAX_ORDER)) + low_pfn += ((1UL << freepage_order) - 1); continue; } @@ -727,7 +727,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, unsigned int comp_order = compound_order(page); if (likely(comp_order < MAX_ORDER)) - low_pfn += (1UL << comp_order) - 1; + low_pfn += ((1UL << comp_order) - 1); continue; } @@ -741,7 +741,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * admittedly racy check. */ if (!page_mapping(page) && - page_count(page) > page_mapcount(page)) + (page_count(page) > page_mapcount(page))) continue; /* If we already hold the lock, we can skip some rechecking */ @@ -761,7 +761,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * is safe to read and it's 0 for tail pages. */ if (unlikely(PageCompound(page))) { - low_pfn += (1UL << compound_order(page)) - 1; + low_pfn += ((1UL << compound_order(page)) - 1); continue; } } @@ -839,7 +839,7 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, block_start_pfn = cc->zone->zone_start_pfn; block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); - for (; pfn < end_pfn; pfn = block_end_pfn, + for (; (pfn < end_pfn); pfn = block_end_pfn, block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { @@ -934,7 +934,7 @@ static void isolate_freepages(struct compact_control *cc) * pages on cc->migratepages. We stop searching if the migrate * and free page scanners meet or enough free pages are isolated. */ - for (; block_start_pfn >= low_pfn; + for (; (block_start_pfn >= low_pfn); block_end_pfn = block_start_pfn, block_start_pfn -= pageblock_nr_pages, isolate_start_pfn = block_start_pfn) { @@ -1074,7 +1074,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, struct page *page; const isolate_mode_t isolate_mode = (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | - (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); + ((cc->mode == MIGRATE_ASYNC) ? ISOLATE_ASYNC_MIGRATE : 0); /* * Start at where we last stopped, or beginning of the zone as @@ -1092,7 +1092,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, * Iterate over whole pageblocks until we find the first suitable. * Do not cross the free scanner. */ - for (; block_end_pfn <= cc->free_pfn; + for (; (block_end_pfn <= cc->free_pfn); low_pfn = block_end_pfn, block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { @@ -1120,7 +1120,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, * Async compaction is optimistic to see if the minimum amount * of work satisfies the allocation. */ - if (cc->mode == MIGRATE_ASYNC && + if ((cc->mode == MIGRATE_ASYNC) && !migrate_async_suitable(get_pageblock_migratetype(page))) continue; @@ -1164,7 +1164,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, */ static inline bool is_via_compact_memory(int order) { - return order == -1; + return (order == -1); } static int __compact_finished(struct zone *zone, struct compact_control *cc, @@ -1214,7 +1214,7 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc, #ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ - if (migratetype == MIGRATE_MOVABLE && + if ((migratetype == MIGRATE_MOVABLE) && !list_empty(&area->free_list[MIGRATE_CMA])) return COMPACT_PARTIAL; #endif @@ -1289,7 +1289,7 @@ static unsigned long __compaction_suitable(struct zone *zone, int order, * Only compact if a failure would be due to fragmentation. */ fragindex = fragmentation_index(zone, order); - if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) + if ((fragindex >= 0) && (fragindex <= sysctl_extfrag_threshold)) return COMPACT_NOT_SUITABLE_ZONE; return COMPACT_CONTINUE; @@ -1342,11 +1342,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) */ cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; cc->free_pfn = zone->compact_cached_free_pfn; - if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { + if ((cc->free_pfn < start_pfn) || (cc->free_pfn >= end_pfn)) { cc->free_pfn = round_down(end_pfn - 1, pageblock_nr_pages); zone->compact_cached_free_pfn = cc->free_pfn; } - if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { + if ((cc->migrate_pfn < start_pfn) || (cc->migrate_pfn >= end_pfn)) { cc->migrate_pfn = start_pfn; zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; @@ -1394,7 +1394,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) * migrate_pages() may return -ENOMEM when scanners meet * and we want compact_finished() to detect it */ - if (err == -ENOMEM && !compact_scanners_met(cc)) { + if ((err == -ENOMEM) && !compact_scanners_met(cc)) { ret = COMPACT_CONTENDED; goto out; } @@ -1408,7 +1408,7 @@ check_drain: * compact_finished() can detect immediately if allocation * would succeed. */ - if (cc->order > 0 && cc->last_migrated_pfn) { + if ((cc->order > 0) && cc->last_migrated_pfn) { int cpu; unsigned long current_block_start = cc->migrate_pfn & ~((1UL << cc->order) - 1); @@ -1558,7 +1558,7 @@ unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, goto break_loop; } - if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) { + if ((mode != MIGRATE_ASYNC) && (status == COMPACT_COMPLETE)) { /* * We think that allocation won't succeed in this zone * so we defer compaction there. If it ends up @@ -1593,7 +1593,7 @@ break_loop: * If at least one zone wasn't deferred or skipped, we report if all * zones that were tried were lock contended. */ - if (rc > COMPACT_SKIPPED && all_zones_contended) + if ((rc > COMPACT_SKIPPED) && all_zones_contended) *contended = COMPACT_CONTENDED_LOCK; return rc; @@ -1709,7 +1709,7 @@ static ssize_t sysfs_compact_node(struct device *dev, { int nid = dev->id; - if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { + if ((nid >= 0) && (nid < nr_node_ids) && node_online(nid)) { /* Flush pending updates to the LRU lists */ lru_add_drain_all(); @@ -1733,7 +1733,7 @@ void compaction_unregister_node(struct node *node) static inline bool kcompactd_work_requested(pg_data_t *pgdat) { - return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); + return (pgdat->kcompactd_max_order > 0) || kthread_should_stop(); } static bool kcompactd_node_suitable(pg_data_t *pgdat) @@ -1926,7 +1926,7 @@ static int cpu_callback(struct notifier_block *nfb, unsigned long action, { int nid; - if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { + if ((action == CPU_ONLINE) || (action == CPU_ONLINE_FROZEN)) { for_each_node_state(nid, N_MEMORY) { pg_data_t *pgdat = NODE_DATA(nid); const struct cpumask *mask; -- 2.7.4