feba625212ad046add3358ad972161e29552fa5b Issues on mm/huge_memory.c file . mm/huge_memory.c | 88 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b49ee12..2b1067c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -158,8 +158,8 @@ static void set_recommended_min_free_kbytes(void) * second to avoid subsequent fallbacks of other types There are 3 * MIGRATE_TYPES we care about. */ - recommended_min += pageblock_nr_pages * nr_zones * - MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; + recommended_min += (pageblock_nr_pages * nr_zones * + MIGRATE_PCPTYPES * MIGRATE_PCPTYPES); /* don't ever allow to reserve more than 5% of the lowmem */ recommended_min = min(recommended_min, @@ -245,7 +245,7 @@ static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, struct shrink_control *sc) { /* we can free zero page only if last reference remains */ - return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; + return (atomic_read(&huge_zero_refcount) == 1) ? HPAGE_PMD_NR : 0; } static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, @@ -463,7 +463,7 @@ static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, int err; err = kstrtoul(buf, 10, &msecs); - if (err || msecs > UINT_MAX) + if (err || (msecs > UINT_MAX)) return -EINVAL; khugepaged_scan_sleep_millisecs = msecs; @@ -490,7 +490,7 @@ static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, int err; err = kstrtoul(buf, 10, &msecs); - if (err || msecs > UINT_MAX) + if (err || (msecs > UINT_MAX)) return -EINVAL; khugepaged_alloc_sleep_millisecs = msecs; @@ -516,7 +516,7 @@ static ssize_t pages_to_scan_store(struct kobject *kobj, unsigned long pages; err = kstrtoul(buf, 10, &pages); - if (err || !pages || pages > UINT_MAX) + if (err || !pages || (pages > UINT_MAX)) return -EINVAL; khugepaged_pages_to_scan = pages; @@ -584,7 +584,7 @@ static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, unsigned long max_ptes_none; err = kstrtoul(buf, 10, &max_ptes_none); - if (err || max_ptes_none > HPAGE_PMD_NR-1) + if (err || (max_ptes_none > (HPAGE_PMD_NR-1))) return -EINVAL; khugepaged_max_ptes_none = max_ptes_none; @@ -914,7 +914,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page; unsigned long haddr = address & HPAGE_PMD_MASK; - if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) + if ((haddr < vma->vm_start) || ((haddr + HPAGE_PMD_SIZE) > vma->vm_end)) return VM_FAULT_FALLBACK; if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; @@ -1006,7 +1006,7 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); BUG_ON(!pfn_t_devmap(pfn)); - if (addr < vma->vm_start || addr >= vma->vm_end) + if ((addr < vma->vm_start) || (addr >= vma->vm_end)) return VM_FAULT_SIGBUS; if (track_pfn_insert(vma, &pgprot, pfn)) return VM_FAULT_SIGBUS; @@ -1042,7 +1042,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, assert_spin_locked(pmd_lockptr(mm, pmd)); - if (flags & FOLL_WRITE && !pmd_write(*pmd)) + if ((flags & FOLL_WRITE) && !pmd_write(*pmd)) return NULL; if (pmd_present(*pmd) && pmd_devmap(*pmd)) @@ -1439,7 +1439,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, * mlocking read-only mapping shared over fork(). We skip * mlocking such pages. */ - if (compound_mapcount(page) == 1 && !PageDoubleMap(page) && + if ((compound_mapcount(page) == 1) && !PageDoubleMap(page) && page->mapping && trylock_page(page)) { lru_add_drain(); if (page->mapping) @@ -1447,7 +1447,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unlock_page(page); } } - page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; + page += ((addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT); VM_BUG_ON_PAGE(!PageCompound(page), page); if (flags & FOLL_GET) get_page(page); @@ -1620,7 +1620,7 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * If user want to discard part-pages of THP, split it so MADV_FREE * will deactivate only them. */ - if (next - addr != HPAGE_PMD_SIZE) { + if ((next - addr) != HPAGE_PMD_SIZE) { get_page(page); spin_unlock(ptl); if (split_huge_page(page)) { @@ -1710,7 +1710,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, if ((old_addr & ~HPAGE_PMD_MASK) || (new_addr & ~HPAGE_PMD_MASK) || - old_end - old_addr < HPAGE_PMD_SIZE || + ((old_end - old_addr) < HPAGE_PMD_SIZE) || (new_vma->vm_flags & VM_NOHUGEPAGE)) return false; @@ -1971,7 +1971,7 @@ void __khugepaged_exit(struct mm_struct *mm) spin_lock(&khugepaged_mm_lock); mm_slot = get_mm_slot(mm); - if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { + if (mm_slot && (khugepaged_scan.mm_slot != mm_slot)) { hash_del(&mm_slot->hash); list_del(&mm_slot->mm_node); free = 1; @@ -2028,7 +2028,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, if (pte_none(pteval) || (pte_present(pteval) && is_zero_pfn(pte_pfn(pteval)))) { if (!userfaultfd_armed(vma) && - ++none_or_zero <= khugepaged_max_ptes_none) { + (++none_or_zero <= khugepaged_max_ptes_none)) { continue; } else { result = SCAN_EXCEED_NONE_PTE; @@ -2065,7 +2065,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * The page must only be referenced by the scanned process * and page swap cache. */ - if (page_count(page) != 1 + !!PageSwapCache(page)) { + if (page_count(page) != (1 + !!PageSwapCache(page))) { unlock_page(page); result = SCAN_PAGE_COUNT; goto out; @@ -2151,7 +2151,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, } else { src_page = pte_page(pteval); copy_user_highpage(page, src_page, address, vma); - VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); + VM_BUG_ON_PAGE((page_mapcount(src_page) != 1), src_page); release_pte_page(src_page); /* * ptl mostly unnecessary, but preempt has to @@ -2402,7 +2402,7 @@ static void collapse_huge_page(struct mm_struct *mm, } hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; - if (address < hstart || address + HPAGE_PMD_SIZE > hend) { + if ((address < hstart) || ((address + HPAGE_PMD_SIZE) > hend)) { result = SCAN_ADDRESS_RANGE; goto out; } @@ -2532,7 +2532,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, pte_t pteval = *_pte; if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { if (!userfaultfd_armed(vma) && - ++none_or_zero <= khugepaged_max_ptes_none) { + (++none_or_zero <= khugepaged_max_ptes_none)) { continue; } else { result = SCAN_EXCEED_NONE_PTE; @@ -2588,7 +2588,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, * The page must only be referenced by the scanned process * and page swap cache. */ - if (page_count(page) != 1 + !!PageSwapCache(page)) { + if (page_count(page) != (1 + !!PageSwapCache(page))) { result = SCAN_PAGE_COUNT; goto out_unmap; } @@ -2624,7 +2624,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) { struct mm_struct *mm = mm_slot->mm; - VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); + VM_BUG_ON((NR_CPUS != 1) && !spin_is_locked(&khugepaged_mm_lock)); if (khugepaged_test_exit(mm)) { /* free mm_slot */ @@ -2654,7 +2654,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int progress = 0; VM_BUG_ON(!pages); - VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); + VM_BUG_ON((NR_CPUS != 1) && !spin_is_locked(&khugepaged_mm_lock)); if (khugepaged_scan.mm_slot) mm_slot = khugepaged_scan.mm_slot; @@ -2703,9 +2703,9 @@ skip: if (unlikely(khugepaged_test_exit(mm))) goto breakouterloop; - VM_BUG_ON(khugepaged_scan.address < hstart || - khugepaged_scan.address + HPAGE_PMD_SIZE > - hend); + VM_BUG_ON((khugepaged_scan.address < hstart) || + ((khugepaged_scan.address + HPAGE_PMD_SIZE) > + hend)); ret = khugepaged_scan_pmd(mm, vma, khugepaged_scan.address, hpage); @@ -2785,7 +2785,7 @@ static void khugepaged_do_scan(void) if (!khugepaged_scan.mm_slot) pass_through_head++; if (khugepaged_has_work() && - pass_through_head < 2) + (pass_through_head < 2)) progress += khugepaged_scan_mm_slot(pages - progress, &hpage); else @@ -2875,7 +2875,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); VM_BUG_ON_VMA(vma->vm_start > haddr, vma); - VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); + VM_BUG_ON_VMA((vma->vm_end < (haddr + HPAGE_PMD_SIZE)), vma); VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)); count_vm_event(THP_SPLIT_PMD); @@ -2932,7 +2932,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, * Set PG_double_map before dropping compound_mapcount to avoid * false-negative page_mapped(). */ - if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { + if ((compound_mapcount(page) > 1) && !TestSetPageDoubleMap(page)) { for (i = 0; i < HPAGE_PMD_NR; i++) atomic_inc(&page[i]._mapcount); } @@ -3045,9 +3045,9 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, * previously contain an hugepage: check if we need to split * an huge pmd. */ - if (start & ~HPAGE_PMD_MASK && - (start & HPAGE_PMD_MASK) >= vma->vm_start && - (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) + if ((start & ~HPAGE_PMD_MASK) && + ((start & HPAGE_PMD_MASK) >= vma->vm_start) && + (((start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE) <= vma->vm_end)) split_huge_pmd_address(vma, start, false, NULL); /* @@ -3055,9 +3055,9 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, * previously contain an hugepage: check if we need to split * an huge pmd. */ - if (end & ~HPAGE_PMD_MASK && - (end & HPAGE_PMD_MASK) >= vma->vm_start && - (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) + if ((end & ~HPAGE_PMD_MASK) && + ((end & HPAGE_PMD_MASK) >= vma->vm_start) && + (((end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE) <= vma->vm_end)) split_huge_pmd_address(vma, end, false, NULL); /* @@ -3068,10 +3068,10 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, if (adjust_next > 0) { struct vm_area_struct *next = vma->vm_next; unsigned long nstart = next->vm_start; - nstart += adjust_next << PAGE_SHIFT; - if (nstart & ~HPAGE_PMD_MASK && - (nstart & HPAGE_PMD_MASK) >= next->vm_start && - (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) + nstart += (adjust_next << PAGE_SHIFT); + if ((nstart & ~HPAGE_PMD_MASK) && + ((nstart & HPAGE_PMD_MASK) >= next->vm_start) && + (((nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE) <= next->vm_end)) split_huge_pmd_address(next, nstart, false, NULL); } } @@ -3150,7 +3150,7 @@ static void __split_huge_page_tail(struct page *head, int tail, set_page_idle(page_tail); /* ->mapping in first tail page is compound_mapcount */ - VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, + VM_BUG_ON_PAGE(((tail > 2) && (page_tail->mapping != TAIL_MAPPING)), page_tail); page_tail->mapping = head->mapping; @@ -3205,13 +3205,13 @@ int total_mapcount(struct page *page) VM_BUG_ON_PAGE(PageTail(page), page); if (likely(!PageCompound(page))) - return atomic_read(&page->_mapcount) + 1; + return (atomic_read(&page->_mapcount) + 1); ret = compound_mapcount(page); if (PageHuge(page)) return ret; for (i = 0; i < HPAGE_PMD_NR; i++) - ret += atomic_read(&page[i]._mapcount) + 1; + ret += (atomic_read(&page[i]._mapcount) + 1); if (PageDoubleMap(page)) ret -= HPAGE_PMD_NR; return ret; @@ -3327,7 +3327,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) * Racy check if we can split the page, before freeze_page() will * split PMDs */ - if (total_mapcount(head) != page_count(head) - 1) { + if (total_mapcount(head) != (page_count(head) - 1)) { ret = -EBUSY; goto out_unlock; } @@ -3344,7 +3344,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) spin_lock_irqsave(&pgdata->split_queue_lock, flags); count = page_count(head); mapcount = total_mapcount(head); - if (!mapcount && count == 1) { + if (!mapcount && (count == 1)) { if (!list_empty(page_deferred_list(head))) { pgdata->split_queue_len--; list_del(page_deferred_list(head)); -- 2.7.4