fbf7995dc4b8e1644419ac0fc9285b55c06de348 Issues on kernel/sys.c file. kernel/sys.c | 52 ++++++++++++++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/kernel/sys.c b/kernel/sys.c index cf8ba54..3c59333 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -156,7 +156,7 @@ static int set_one_prio(struct task_struct *p, int niceval, int error) error = -EPERM; goto out; } - if (niceval < task_nice(p) && !can_nice(p, niceval)) { + if ((niceval < task_nice(p)) && !can_nice(p, niceval)) { error = -EACCES; goto out; } @@ -181,7 +181,7 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) struct pid *pgrp; kuid_t uid; - if (which > PRIO_USER || which < PRIO_PROCESS) + if ((which > PRIO_USER) || (which < PRIO_PROCESS)) goto out; /* normalize: avoid signed division (rounding problems) */ @@ -251,7 +251,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) struct pid *pgrp; kuid_t uid; - if (which > PRIO_USER || which < PRIO_PROCESS) + if ((which > PRIO_USER) || (which < PRIO_PROCESS)) return -EINVAL; rcu_read_lock(); @@ -433,8 +433,8 @@ static int set_user(struct cred *new) * for programs doing set*uid()+execve() by harmlessly deferring the * failure to the execve() stage. */ - if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && - new_user != INIT_USER) + if ((atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC)) && + (new_user != INIT_USER)) current->flags |= PF_NPROC_EXCEEDED; else current->flags &= ~PF_NPROC_EXCEEDED; @@ -1100,7 +1100,7 @@ DECLARE_RWSEM(uts_sem); #ifdef COMPAT_UTS_MACHINE #define override_architecture(name) \ - (personality(current->personality) == PER_LINUX32 && \ + ((personality(current->personality) == PER_LINUX32) && \ copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ sizeof(COMPAT_UTS_MACHINE))) #else @@ -1326,9 +1326,9 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, static inline bool rlim64_is_infinity(__u64 rlim64) { #if BITS_PER_LONG < 64 - return rlim64 >= ULONG_MAX; + return (rlim64 >= ULONG_MAX); #else - return rlim64 == RLIM64_INFINITY; + return (rlim64 == RLIM64_INFINITY); #endif } @@ -1368,8 +1368,8 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource, if (new_rlim) { if (new_rlim->rlim_cur > new_rlim->rlim_max) return -EINVAL; - if (resource == RLIMIT_NOFILE && - new_rlim->rlim_max > sysctl_nr_open) + if ((resource == RLIMIT_NOFILE) && + (new_rlim->rlim_max > sysctl_nr_open)) return -EPERM; } @@ -1385,13 +1385,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource, if (new_rlim) { /* Keep the capable check against init_user_ns until cgroups can contain all limits */ - if (new_rlim->rlim_max > rlim->rlim_max && + if ((new_rlim->rlim_max > rlim->rlim_max) && !capable(CAP_SYS_RESOURCE)) retval = -EPERM; if (!retval) retval = security_task_setrlimit(tsk->group_leader, resource, new_rlim); - if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { + if ((resource == RLIMIT_CPU) && (new_rlim->rlim_cur == 0)) { /* * The caller is asking for an immediate RLIMIT_CPU * expiry. But we use the zero value to mean "it was @@ -1415,8 +1415,8 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource, * very long-standing error, and fixing it now risks breakage of * applications, so we live with it */ - if (!retval && new_rlim && resource == RLIMIT_CPU && - new_rlim->rlim_cur != RLIM_INFINITY) + if (!retval && new_rlim && (resource == RLIMIT_CPU) && + (new_rlim->rlim_cur != RLIM_INFINITY)) update_rlimit_cpu(tsk, new_rlim->rlim_cur); out: read_unlock(&tasklist_lock); @@ -1752,8 +1752,8 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map) for (i = 0; i < ARRAY_SIZE(offsets); i++) { u64 val = *(u64 *)((char *)prctl_map + offsets[i]); - if ((unsigned long)val >= mmap_max_addr || - (unsigned long)val < mmap_min_addr) + if (((unsigned long)val >= mmap_max_addr) || + ((unsigned long)val < mmap_min_addr)) goto out; } @@ -1777,8 +1777,8 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map) /* * @brk should be after @end_data in traditional maps. */ - if (prctl_map->start_brk <= prctl_map->end_data || - prctl_map->brk <= prctl_map->end_data) + if ((prctl_map->start_brk <= prctl_map->end_data) || + (prctl_map->brk <= prctl_map->end_data)) goto out; /* @@ -1793,7 +1793,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map) * Someone is trying to cheat the auxv vector. */ if (prctl_map->auxv_size) { - if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv)) + if (!prctl_map->auxv || (prctl_map->auxv_size > sizeof(mm->saved_auxv))) goto out; } @@ -1939,13 +1939,13 @@ static int prctl_set_mm(int opt, unsigned long addr, struct vm_area_struct *vma; int error; - if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV && - opt != PR_SET_MM_MAP && - opt != PR_SET_MM_MAP_SIZE))) + if (arg5 || (arg4 && ((opt != PR_SET_MM_AUXV) && + (opt != PR_SET_MM_MAP) && + (opt != PR_SET_MM_MAP_SIZE)))) return -EINVAL; #ifdef CONFIG_CHECKPOINT_RESTORE - if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE) + if ((opt == PR_SET_MM_MAP) || (opt == PR_SET_MM_MAP_SIZE)) return prctl_set_mm_map(opt, (const void __user *)addr, arg4); #endif @@ -1958,7 +1958,7 @@ static int prctl_set_mm(int opt, unsigned long addr, if (opt == PR_SET_MM_AUXV) return prctl_set_auxv(mm, addr, arg4); - if (addr >= TASK_SIZE || addr < mmap_min_addr) + if ((addr >= TASK_SIZE) || (addr < mmap_min_addr)) return -EINVAL; error = -EINVAL; @@ -2099,7 +2099,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, error = get_dumpable(me->mm); break; case PR_SET_DUMPABLE: - if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) { + if ((arg2 != SUID_DUMP_DISABLE) && (arg2 != SUID_DUMP_USER)) { error = -EINVAL; break; } @@ -2321,7 +2321,7 @@ static int do_sysinfo(struct sysinfo *info) */ mem_total = info->totalram + info->totalswap; - if (mem_total < info->totalram || mem_total < info->totalswap) + if ((mem_total < info->totalram) || (mem_total < info->totalswap)) goto out; bitcount = 0; mem_unit = info->mem_unit; -- 2.7.4