caa2d8f02fd3d68332190431d7d1d82f19ce3147 Issues on net/ipv4/tcp_ipv4.c file . net/ipv4/tcp_ipv4.c | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ad45050..a51e858 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -122,7 +122,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) */ if (tcptw->tw_ts_recent_stamp && (!twp || (sysctl_tcp_tw_reuse && - get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { + (get_seconds() - (tcptw->tw_ts_recent_stamp > 1))))) { tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; if (tp->write_seq == 0) tp->write_seq = 1; @@ -281,12 +281,12 @@ void tcp_v4_mtu_reduced(struct sock *sk) /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ - if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) + if ((mtu < dst_mtu(dst)) && ip_dont_fragment(sk, dst)) sk->sk_err_soft = EMSGSIZE; mtu = dst_mtu(dst); - if (inet->pmtudisc != IP_PMTUDISC_DONT && + if ((inet->pmtudisc != IP_PMTUDISC_DONT) && ip_sk_accept_pmtu(sk) && inet_csk(sk)->icsk_pmtu_cookie > mtu) { tcp_sync_mss(sk, mtu); @@ -382,11 +382,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) seq = ntohl(th->seq); if (sk->sk_state == TCP_NEW_SYN_RECV) return tcp_req_err(sk, seq, - type == ICMP_PARAMETERPROB || - type == ICMP_TIME_EXCEEDED || - (type == ICMP_DEST_UNREACH && - (code == ICMP_NET_UNREACH || - code == ICMP_HOST_UNREACH))); + (type == ICMP_PARAMETERPROB) || + (type == ICMP_TIME_EXCEEDED) || + ((type == ICMP_DEST_UNREACH) && + ((code == ICMP_NET_UNREACH) || + (code == ICMP_HOST_UNREACH)))); bh_lock_sock(sk); /* If too many ICMPs get dropped on busy @@ -395,7 +395,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) * we can receive locally generated ICMP messages while socket is held. */ if (sock_owned_by_user(sk)) { - if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) + if (!((type == ICMP_DEST_UNREACH) && (code == ICMP_FRAG_NEEDED))) NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); } if (sk->sk_state == TCP_CLOSE) @@ -411,7 +411,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = tp->fastopen_rsk; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; - if (sk->sk_state != TCP_LISTEN && + if ((sk->sk_state != TCP_LISTEN) && !between(seq, snd_una, tp->snd_nxt)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; @@ -452,9 +452,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) err = icmp_err_convert[code].errno; /* check if icmp_skb allows revert of backoff * (see draft-zimmermann-tcp-lcd) */ - if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) + if ((code != ICMP_NET_UNREACH) && (code != ICMP_HOST_UNREACH)) break; - if (seq != tp->snd_una || !icsk->icsk_retransmits || + if ((seq != tp->snd_una) || !icsk->icsk_retransmits || !icsk->icsk_backoff || fastopen) break; @@ -604,7 +604,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) /* If sk not NULL, it means we did a successful lookup and incoming * route had to be correct. prequeue might have dropped our dst. */ - if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) + if (!sk && (skb_rtable(skb)->rt_type != RTN_LOCAL)) return; /* Swap the send and the receive. */ @@ -838,7 +838,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, struct sk_buff *skb; /* First, grab a route. */ - if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) + if (!dst && ((dst = inet_csk_route_req(sk, &fl4, req)) == NULL)) return -1; skb = tcp_make_synack(sk, dst, req, foc, attach_req); @@ -1386,7 +1386,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { - if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || + if ((inet_sk(sk)->rx_dst_ifindex != skb->skb_iif) || !dst->ops->check(dst, 0)) { dst_release(dst); sk->sk_rx_dst = NULL; @@ -1471,7 +1471,7 @@ void tcp_v4_early_demux(struct sk_buff *skb) if (dst) dst = dst_check(dst, 0); if (dst && - inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) + (inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)) skb_dst_set_noref(skb, dst); } } @@ -1491,8 +1491,8 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) if (sysctl_tcp_low_latency || !tp->ucopy.task) return false; - if (skb->len <= tcp_hdrlen(skb) && - skb_queue_len(&tp->ucopy.prequeue) == 0) + if ((skb->len <= tcp_hdrlen(skb)) && + (skb_queue_len(&tp->ucopy.prequeue) == 0)) return false; /* Before escaping RCU protected region, we need to take care of skb @@ -1919,7 +1919,7 @@ static void *established_get_first(struct seq_file *seq) void *rc = NULL; st->offset = 0; - for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { + for (; (st->bucket <= tcp_hashinfo.ehash_mask); ++st->bucket) { struct sock *sk; struct hlist_nulls_node *node; spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); @@ -1930,7 +1930,7 @@ static void *established_get_first(struct seq_file *seq) spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { - if (sk->sk_family != st->family || + if ((sk->sk_family != st->family) || !net_eq(sock_net(sk), net)) { continue; } @@ -2034,7 +2034,7 @@ static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) struct tcp_iter_state *st = seq->private; void *rc; - if (*pos && *pos == st->last_pos) { + if (*pos && (*pos == st->last_pos)) { rc = tcp_seek_last_pos(seq); if (rc) goto out; @@ -2179,9 +2179,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) int rx_queue; int state; - if (icsk->icsk_pending == ICSK_TIME_RETRANS || - icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || - icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + if ((icsk->icsk_pending == ICSK_TIME_RETRANS) || + (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS) || + (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { -- 2.7.4