3566071cb1e517b39c22dbd1bad42b1cd219d568 Issues on mm/slub.c file . mm/slub.c | 82 +++++++++++++++++++++++++++++++-------------------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 654db31..41f7bc3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -305,7 +305,7 @@ static inline int oo_order(struct kmem_cache_order_objects x) static inline int oo_objects(struct kmem_cache_order_objects x) { - return x.x & OO_MASK; + return (x.x & OO_MASK); } /* @@ -356,8 +356,8 @@ static bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, #endif { slab_lock(page); - if (page->freelist == freelist_old && - page->counters == counters_old) { + if ((page->freelist == freelist_old) && + (page->counters == counters_old)) { page->freelist = freelist_new; set_page_slub_counters(page, counters_new); slab_unlock(page); @@ -395,8 +395,8 @@ static bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, local_irq_save(flags); slab_lock(page); - if (page->freelist == freelist_old && - page->counters == counters_old) { + if ((page->freelist == freelist_old) && + (page->counters == counters_old)) { page->freelist = freelist_new; set_page_slub_counters(page, counters_new); slab_unlock(page); @@ -494,7 +494,7 @@ static int check_valid_pointer(struct kmem_cache *s, base = page_address(page); object = restore_red_left(s, object); - if (object < base || object >= base + page->objects * s->size || + if ((object < base) || (object >= base + page->objects * s->size) || (object - base) % s->size) { return 0; } @@ -542,8 +542,8 @@ static void set_track(struct kmem_cache *s, void *object, metadata_access_disable(); /* See rant in lockdep.c */ - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries - 1] == ULONG_MAX) + if ((trace.nr_entries != 0) && + (trace.entries[trace.nr_entries - 1] == ULONG_MAX)) trace.nr_entries--; for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++) @@ -658,7 +658,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) off = s->inuse; if (s->flags & SLAB_STORE_USER) - off += 2 * sizeof(struct track); + off += (2 * sizeof(struct track)); if (off != size_from_object(s)) /* Beginning of the filler is the free pointer */ @@ -725,7 +725,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, return 1; end = start + bytes; - while (end > fault && end[-1] == value) + while ((end > fault) && (end[-1] == value)) end--; slab_bug(s, "%s overwritten", what); @@ -785,7 +785,7 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) if (s->flags & SLAB_STORE_USER) /* We also have user information there */ - off += 2 * sizeof(struct track); + off += (2 * sizeof(struct track)); if (size_from_object(s) == off) return 1; @@ -843,7 +843,7 @@ static int check_object(struct kmem_cache *s, struct page *page, endobject, val, s->inuse - s->object_size)) return 0; } else { - if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { + if ((s->flags & SLAB_POISON) && (s->object_size < s->inuse)) { check_bytes_and_report(s, page, p, "Alignment padding", endobject, POISON_INUSE, s->inuse - s->object_size); @@ -851,7 +851,7 @@ static int check_object(struct kmem_cache *s, struct page *page, } if (s->flags & SLAB_POISON) { - if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && + if ((val != SLUB_RED_ACTIVE) && (s->flags & __OBJECT_POISON) && (!check_bytes_and_report(s, page, p, "Poison", p, POISON_FREE, s->object_size - 1) || !check_bytes_and_report(s, page, p, "Poison", @@ -863,7 +863,7 @@ static int check_object(struct kmem_cache *s, struct page *page, check_pad_bytes(s, page, p); } - if (!s->offset && val == SLUB_RED_ACTIVE) + if (!s->offset && (val == SLUB_RED_ACTIVE)) /* * Object and freepointer overlap. Cannot check * freepointer while object is allocated. @@ -923,7 +923,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) int max_objects; fp = page->freelist; - while (fp && nr <= page->objects) { + while (fp && (nr <= page->objects)) { if (fp == search) return 1; if (!check_valid_pointer(s, page, fp)) { @@ -1187,7 +1187,7 @@ out: static int __init setup_slub_debug(char *str) { slub_debug = DEBUG_DEFAULT_FLAGS; - if (*str++ != '=' || !*str) + if ((*str++ != '=') || !*str) /* * No options specified. Switch on full debugging. */ @@ -1210,7 +1210,7 @@ static int __init setup_slub_debug(char *str) /* * Determine which debug features should be switched on */ - for (; *str && *str != ','; str++) { + for (; (*str) && *str != ','; str++) { switch (tolower(*str)) { case 'f': slub_debug |= SLAB_CONSISTENCY_CHECKS; @@ -1425,7 +1425,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * so we fall-back to the minimum order allocation. */ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; - if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) + if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && (oo_order(oo) > oo_order(s->min))) alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); page = alloc_slab_page(s, alloc_gfp, node, oo); @@ -1702,7 +1702,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, stat(s, CPU_PARTIAL_NODE); } if (!kmem_cache_has_cpu_partial(s) - || available > s->cpu_partial / 2) + || (available > s->cpu_partial / 2)) break; } @@ -1755,7 +1755,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, n = get_node(s, zone_to_nid(zone)); if (n && cpuset_zone_allowed(zone, flags) && - n->nr_partial > s->min_partial) { + (n->nr_partial > s->min_partial)) { object = get_partial_node(s, n, c, flags); if (object) { /* @@ -1941,7 +1941,7 @@ redo: new.frozen = 0; - if (!new.inuse && n->nr_partial >= s->min_partial) + if (!new.inuse && (n->nr_partial >= s->min_partial)) m = M_FREE; else if (new.freelist) { m = M_PARTIAL; @@ -2052,7 +2052,7 @@ static void unfreeze_partials(struct kmem_cache *s, new.freelist, new.counters, "unfreezing slab")); - if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { + if (unlikely(!new.inuse && (n->nr_partial >= s->min_partial))) { page->next = discard_page; discard_page = page; } else { @@ -2100,7 +2100,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) if (oldpage) { pobjects = oldpage->pobjects; pages = oldpage->pages; - if (drain && pobjects > s->cpu_partial) { + if (drain && (pobjects > s->cpu_partial)) { unsigned long flags; /* * partial array is full. Move the existing @@ -2117,7 +2117,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) } pages++; - pobjects += page->objects - page->inuse; + pobjects += (page->objects - page->inuse); page->pages = pages; page->pobjects = pobjects; @@ -2370,7 +2370,7 @@ redo: if (unlikely(!node_match(page, node))) { int searchnode = node; - if (node != NUMA_NO_NODE && !node_present_pages(node)) + if ((node != NUMA_NO_NODE) && !node_present_pages(node)) searchnode = node_to_mem_node(node); if (unlikely(!node_match(page, searchnode))) { @@ -2720,7 +2720,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, return; } - if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) + if (unlikely(!new.inuse && (n->nr_partial >= s->min_partial))) goto slab_empty; /* @@ -3280,7 +3280,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) * end of the object and the free pointer. If not then add an * additional word to have some bytes to store Redzone information. */ - if ((flags & SLAB_RED_ZONE) && size == s->object_size) + if ((flags & SLAB_RED_ZONE) && (size == s->object_size)) size += sizeof(void *); #endif @@ -3310,7 +3310,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) * Need to store information about allocs and frees after * the object. */ - size += 2 * sizeof(struct track); + size += (2 * sizeof(struct track)); if (flags & SLAB_RED_ZONE) { /* @@ -3389,7 +3389,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) - if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) + if (system_has_cmpxchg_double() && ((s->flags & SLAB_NO_CMPXCHG) == 0)) /* Enable fast mode */ s->flags |= __CMPXCHG_DOUBLE; #endif @@ -4303,7 +4303,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, /* * Not found. Insert new tracking element. */ - if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) + if ((t->count >= t->max) && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) return 0; l = t->loc + pos; @@ -4377,7 +4377,7 @@ static int list_locations(struct kmem_cache *s, char *buf, for (i = 0; i < t.count; i++) { struct location *l = &t.loc[i]; - if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) + if (len > (PAGE_SIZE - KSYM_SYMBOL_LEN - 100)) break; len += sprintf(buf + len, "%7ld ", l->count); @@ -4402,15 +4402,15 @@ static int list_locations(struct kmem_cache *s, char *buf, len += sprintf(buf + len, " pid=%ld", l->min_pid); - if (num_online_cpus() > 1 && + if ((num_online_cpus() > 1) && !cpumask_empty(to_cpumask(l->cpus)) && - len < PAGE_SIZE - 60) + (len < (PAGE_SIZE - 60))) len += scnprintf(buf + len, PAGE_SIZE - len - 50, " cpus=%*pbl", cpumask_pr_args(to_cpumask(l->cpus))); - if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && - len < PAGE_SIZE - 60) + if ((nr_online_nodes > 1) && !nodes_empty(l->nodes) && + (len < (PAGE_SIZE - 60))) len += scnprintf(buf + len, PAGE_SIZE - len - 50, " nodes=%*pbl", nodemask_pr_args(&l->nodes)); @@ -4431,7 +4431,7 @@ static void __init resiliency_test(void) { u8 *p; - BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); + BUILD_BUG_ON((KMALLOC_MIN_SIZE > 16) || (KMALLOC_SHIFT_HIGH < 10)); pr_err("SLUB resiliency testing\n"); pr_err("-----------------------\n"); @@ -4453,7 +4453,7 @@ static void __init resiliency_test(void) validate_slab_cache(kmalloc_caches[5]); p = kzalloc(64, GFP_KERNEL); - p += 64 + (get_cycles() & 0xff) * sizeof(void *); + p += (64 + (get_cycles() & 0xff) * sizeof(void *)); *p = 0x56; pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", p); @@ -4662,7 +4662,7 @@ static ssize_t order_store(struct kmem_cache *s, if (err) return err; - if (order > slub_max_order || order < slub_min_order) + if ((order > slub_max_order) || (order < slub_min_order)) return -EINVAL; calculate_sizes(s, order); @@ -4778,7 +4778,7 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) for_each_online_cpu(cpu) { struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; - if (page && len < PAGE_SIZE - 20) + if (page && (len < (PAGE_SIZE - 20))) len += sprintf(buf + len, " C%d=%d(%d)", cpu, page->pobjects, page->pages); } @@ -5062,7 +5062,7 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) #ifdef CONFIG_SMP for_each_online_cpu(cpu) { - if (data[cpu] && len < PAGE_SIZE - 20) + if (data[cpu] && (len < (PAGE_SIZE - 20))) len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); } #endif @@ -5233,7 +5233,7 @@ static ssize_t slab_attr_store(struct kobject *kobj, err = attribute->store(s, buf, len); #ifdef CONFIG_MEMCG - if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { + if ((slab_state >= FULL) && (err >= 0) && is_root_cache(s)) { struct kmem_cache *c; mutex_lock(&slab_mutex); -- 2.7.4