Commit f64f9e71 authored by Joe Perches's avatar Joe Perches Committed by David S. Miller

net: Move && and || to end of previous line

Not including net/atm/

Compiled tested x86 allyesconfig only
Added a > 80 column line or two, which I ignored.
Existing checkpatch plaints willfully, cheerfully ignored.
Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 152b6a62
......@@ -633,8 +633,8 @@ static void p9_poll_mux(struct p9_conn *m)
if (n & POLLOUT) {
set_bit(Wpending, &m->wsched);
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
if ((m->wsize || !list_empty(&m->unsent_req_list))
&& !test_and_set_bit(Wworksched, &m->wsched)) {
if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
queue_work(p9_mux_wq, &m->wq);
}
......
......@@ -1362,8 +1362,8 @@ static int l2cap_ertm_send(struct sock *sk)
if (pi->conn_state & L2CAP_CONN_WAIT_F)
return 0;
while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))
&& !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
tx_skb = skb_clone(skb, GFP_ATOMIC);
if (pi->remote_max_tx &&
......@@ -1604,8 +1604,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
return -EOPNOTSUPP;
/* Check outgoing MTU */
if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
&& len > pi->omtu)
if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
len > pi->omtu)
return -EINVAL;
lock_sock(sk);
......@@ -2756,8 +2756,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
goto unlock;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
|| l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
sk->sk_state = BT_CONNECTED;
......@@ -2845,8 +2845,8 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
|| l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
sk->sk_state = BT_CONNECTED;
......@@ -3388,8 +3388,8 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
pi->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(sk);
if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
&& (pi->unacked_frames > 0))
if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
(pi->unacked_frames > 0))
__mod_retrans_timer();
l2cap_ertm_send(sk);
......
......@@ -60,8 +60,8 @@ static inline unsigned long hold_time(const struct net_bridge *br)
static inline int has_expired(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
return !fdb->is_static
&& time_before_eq(fdb->ageing_timer + hold_time(br), jiffies);
return !fdb->is_static &&
time_before_eq(fdb->ageing_timer + hold_time(br), jiffies);
}
static inline int br_mac_hash(const unsigned char *mac)
......
......@@ -316,9 +316,9 @@ static ssize_t store_group_addr(struct device *d,
if (new_addr[5] & ~0xf)
return -EINVAL;
if (new_addr[5] == 1 /* 802.3x Pause address */
|| new_addr[5] == 2 /* 802.3ad Slow protocols */
|| new_addr[5] == 3) /* 802.1X PAE address */
if (new_addr[5] == 1 || /* 802.3x Pause address */
new_addr[5] == 2 || /* 802.3ad Slow protocols */
new_addr[5] == 3) /* 802.1X PAE address */
return -EINVAL;
spin_lock_bh(&br->lock);
......
......@@ -135,8 +135,8 @@ ebt_stp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (memcmp(sp, header, sizeof(header)))
return false;
if (info->bitmask & EBT_STP_TYPE
&& FWINV(info->type != sp->type, EBT_STP_TYPE))
if (info->bitmask & EBT_STP_TYPE &&
FWINV(info->type != sp->type, EBT_STP_TYPE))
return false;
if (sp->type == BPDU_TYPE_CONFIG &&
......
......@@ -375,8 +375,8 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
return &d->rx[RX_ALL];
/* extra filterlists for the subscription of a single non-RTR can_id */
if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
&& !(*can_id & CAN_RTR_FLAG)) {
if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
!(*can_id & CAN_RTR_FLAG)) {
if (*can_id & CAN_EFF_FLAG) {
if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
......@@ -525,8 +525,8 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
*/
hlist_for_each_entry_rcu(r, next, rl, list) {
if (r->can_id == can_id && r->mask == mask
&& r->func == func && r->data == data)
if (r->can_id == can_id && r->mask == mask &&
r->func == func && r->data == data)
break;
}
......
......@@ -2677,9 +2677,10 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
return GRO_NORMAL;
for (p = napi->gro_list; p; p = p->next) {
NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
&& !compare_ether_header(skb_mac_header(p),
skb_gro_mac_header(skb));
NAPI_GRO_CB(p)->same_flow =
(p->dev == skb->dev) &&
!compare_ether_header(skb_mac_header(p),
skb_gro_mac_header(skb));
NAPI_GRO_CB(p)->flush = 0;
}
......
......@@ -2052,9 +2052,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
read_lock_bh(&idev->lock);
for (ifp = idev->addr_list; ifp;
ifp = ifp->if_next) {
if (ifp->scope == IFA_LINK
&& !(ifp->
flags & IFA_F_TENTATIVE)) {
if (ifp->scope == IFA_LINK &&
!(ifp->flags & IFA_F_TENTATIVE)) {
ipv6_addr_copy(&pkt_dev->
cur_in6_saddr,
&ifp->addr);
......
......@@ -1085,8 +1085,8 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
u8 value_byte;
u32 value_int;
if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg
|| !netdev->dcbnl_ops->setbcnrp)
if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
!netdev->dcbnl_ops->setbcnrp)
return ret;
ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
......
......@@ -581,8 +581,9 @@ static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct
DN_FIB_SCAN_KEY(f, fp, key) {
if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority)
break;
if (f->fn_type == type && f->fn_scope == r->rtm_scope
&& DN_FIB_INFO(f) == fi)
if (f->fn_type == type &&
f->fn_scope == r->rtm_scope &&
DN_FIB_INFO(f) == fi)
goto out;
}
......
......@@ -1899,8 +1899,9 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
err = -EADDRNOTAVAIL;
for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
if (pmc->multi.imr_multiaddr.s_addr == imr.imr_multiaddr.s_addr
&& pmc->multi.imr_ifindex == imr.imr_ifindex)
if ((pmc->multi.imr_multiaddr.s_addr ==
imr.imr_multiaddr.s_addr) &&
(pmc->multi.imr_ifindex == imr.imr_ifindex))
break;
}
if (!pmc) { /* must have a prior join */
......
......@@ -155,10 +155,10 @@ static int nf_ip_reroute(struct sk_buff *skb,
if (entry->hook == NF_INET_LOCAL_OUT) {
const struct iphdr *iph = ip_hdr(skb);
if (!(iph->tos == rt_info->tos
&& skb->mark == rt_info->mark
&& iph->daddr == rt_info->daddr
&& iph->saddr == rt_info->saddr))
if (!(iph->tos == rt_info->tos &&
skb->mark == rt_info->mark &&
iph->daddr == rt_info->daddr &&
iph->saddr == rt_info->saddr))
return ip_route_me_harder(skb, RTN_UNSPEC);
}
return 0;
......
......@@ -1403,8 +1403,8 @@ irnet_connect_indication(void * instance,
/* Socket already connecting ? On primary ? */
if(0
#ifdef ALLOW_SIMULT_CONNECT
|| ((irttp_is_primary(server->tsap) == 1) /* primary */
&& (test_and_clear_bit(0, &new->ttp_connect)))
|| ((irttp_is_primary(server->tsap) == 1) && /* primary */
(test_and_clear_bit(0, &new->ttp_connect)))
#endif /* ALLOW_SIMULT_CONNECT */
)
{
......
......@@ -211,9 +211,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
* check if configuration can support the BA policy
* and if buffer size does not exceeds max value */
/* XXX: check own ht delayed BA capability?? */
if (((ba_policy != 1)
&& (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA)))
|| (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
if (((ba_policy != 1) &&
(!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
(buf_size > IEEE80211_MAX_AMPDU_BUF)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
......
......@@ -631,8 +631,8 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
sta_info_stop(local);
rate_control_deinitialize(local);
if (skb_queue_len(&local->skb_queue)
|| skb_queue_len(&local->skb_queue_unreliable))
if (skb_queue_len(&local->skb_queue) ||
skb_queue_len(&local->skb_queue_unreliable))
printk(KERN_WARNING "%s: skb_queue not empty\n",
wiphy_name(local->hw.wiphy));
skb_queue_purge(&local->skb_queue);
......
......@@ -195,8 +195,8 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
list_del(&p->list);
kmem_cache_free(rm_cache, p);
--entries;
} else if ((seqnum == p->seqnum)
&& (memcmp(sa, p->sa, ETH_ALEN) == 0))
} else if ((seqnum == p->seqnum) &&
(memcmp(sa, p->sa, ETH_ALEN) == 0))
return -1;
}
......
......@@ -936,17 +936,16 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
}
if (mpath->flags & MESH_PATH_ACTIVE) {
if (time_after(jiffies, mpath->exp_time +
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
&& !memcmp(sdata->dev->dev_addr, hdr->addr4,
ETH_ALEN)
&& !(mpath->flags & MESH_PATH_RESOLVING)
&& !(mpath->flags & MESH_PATH_FIXED)) {
if (time_after(jiffies,
mpath->exp_time +
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
!memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) &&
!(mpath->flags & MESH_PATH_RESOLVING) &&
!(mpath->flags & MESH_PATH_FIXED)) {
mesh_queue_preq(mpath,
PREQ_Q_F_START | PREQ_Q_F_REFRESH);
}
memcpy(hdr->addr1, mpath->next_hop->sta.addr,
ETH_ALEN);
memcpy(hdr->addr1, mpath->next_hop->sta.addr, ETH_ALEN);
} else {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!(mpath->flags & MESH_PATH_RESOLVING)) {
......
......@@ -1168,8 +1168,8 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
rx->key))
return -EACCES;
/* BIP does not use Protected field, so need to check MMIE */
if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb)
&& ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
rx->key))
return -EACCES;
/*
......
......@@ -366,10 +366,11 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
u32 staflags;
if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)
|| ieee80211_is_auth(hdr->frame_control)
|| ieee80211_is_assoc_resp(hdr->frame_control)
|| ieee80211_is_reassoc_resp(hdr->frame_control)))
if (unlikely(!sta ||
ieee80211_is_probe_resp(hdr->frame_control) ||
ieee80211_is_auth(hdr->frame_control) ||
ieee80211_is_assoc_resp(hdr->frame_control) ||
ieee80211_is_reassoc_resp(hdr->frame_control)))
return TX_CONTINUE;
staflags = get_sta_flags(sta);
......
......@@ -202,9 +202,9 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
static inline int expect_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b)
{
return a->master == b->master && a->class == b->class
&& nf_ct_tuple_equal(&a->tuple, &b->tuple)
&& nf_ct_tuple_mask_equal(&a->mask, &b->mask);
return a->master == b->master && a->class == b->class &&
nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
nf_ct_tuple_mask_equal(&a->mask, &b->mask);
}
/* Generally a bad idea to call this: could have matched already. */
......
......@@ -243,8 +243,8 @@ static int try_epsv_response(const char *data, size_t dlen,
/* Three delimiters. */
if (dlen <= 3) return 0;
delim = data[0];
if (isdigit(delim) || delim < 33 || delim > 126
|| data[1] != delim || data[2] != delim)
if (isdigit(delim) || delim < 33 || delim > 126 ||
data[1] != delim || data[2] != delim)
return 0;
return get_port(data, 3, dlen, delim, &cmd->u.tcp.port);
......@@ -366,8 +366,8 @@ static int help(struct sk_buff *skb,
typeof(nf_nat_ftp_hook) nf_nat_ftp;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED
&& ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
return NF_ACCEPT;
}
......
......@@ -1034,9 +1034,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
goto out_xmit;
packet_increment_head(&po->tx_ring);
len_sum += tp_len;
} while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
&& (atomic_read(&po->tx_ring.pending))))
);
} while (likely((ph != NULL) ||
((!(msg->msg_flags & MSG_DONTWAIT)) &&
(atomic_read(&po->tx_ring.pending))))
);
err = len_sum;
goto out_put;
......
......@@ -714,8 +714,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
return -EINVAL;
lock_sock(sk);
if (sock_flag(sk, SOCK_URGINLINE)
&& !skb_queue_empty(&pn->ctrlreq_queue))
if (sock_flag(sk, SOCK_URGINLINE) &&
!skb_queue_empty(&pn->ctrlreq_queue))
answ = skb_peek(&pn->ctrlreq_queue)->len;
else if (!skb_queue_empty(&sk->sk_receive_queue))
answ = skb_peek(&sk->sk_receive_queue)->len;
......
......@@ -98,8 +98,8 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
if (pn->resource != res)
continue;
}
if (pn_addr(pn->sobject)
&& pn_addr(pn->sobject) != pn_addr(obj))
if (pn_addr(pn->sobject) &&
pn_addr(pn->sobject) != pn_addr(obj))
continue;
rval = sknode;
......
......@@ -174,8 +174,8 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
mask |= (POLLIN | POLLRDNORM);
spin_unlock(&rs->rs_lock);
}
if (!list_empty(&rs->rs_recv_queue)
|| !list_empty(&rs->rs_notify_queue))
if (!list_empty(&rs->rs_recv_queue) ||
!list_empty(&rs->rs_notify_queue))
mask |= (POLLIN | POLLRDNORM);
if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
mask |= (POLLOUT | POLLWRNORM);
......@@ -308,8 +308,8 @@ static int rds_getsockopt(struct socket *sock, int level, int optname,
if (len < sizeof(int))
ret = -EINVAL;
else
if (put_user(rs->rs_recverr, (int __user *) optval)
|| put_user(sizeof(int), optlen))
if (put_user(rs->rs_recverr, (int __user *) optval) ||
put_user(sizeof(int), optlen))
ret = -EFAULT;
else
ret = 0;
......
......@@ -133,10 +133,8 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
spin_lock_irqsave(&rds_conn_lock, flags);
conn = rds_conn_lookup(head, laddr, faddr, trans);
if (conn
&& conn->c_loopback
&& conn->c_trans != &rds_loop_transport
&& !is_outgoing) {
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
!is_outgoing) {
/* This is a looped back IB connection, and we're
* called by the code handling the incoming connect.
* We need a second connection object into which we
......
......@@ -377,8 +377,8 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
}
/* Even if len is crap *now* I still want to check it. -ASG */
if (event->param.conn.private_data_len < sizeof (*dp)
|| dp->dp_protocol_major == 0)
if (event->param.conn.private_data_len < sizeof (*dp) ||
dp->dp_protocol_major == 0)
return RDS_PROTOCOL_3_0;
common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
......
......@@ -570,8 +570,8 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
spin_unlock_irqrestore(&pool->list_lock, flags);
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned
|| atomic_read(&pool->dirty_count) >= pool->max_items / 10)
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
atomic_read(&pool->dirty_count) >= pool->max_items / 10)
queue_work(rds_wq, &pool->flush_worker);
if (invalidate) {
......
......@@ -230,8 +230,8 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
int ret = 0;
u32 pos;
while ((prefill || rds_conn_up(conn))
&& rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
while ((prefill || rds_conn_up(conn)) &&
rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
if (pos >= ic->i_recv_ring.w_nr) {
printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
pos);
......@@ -771,10 +771,10 @@ static void rds_ib_process_recv(struct rds_connection *conn,
hdr = &ibinc->ii_inc.i_hdr;
/* We can't just use memcmp here; fragments of a
* single message may carry different ACKs */
if (hdr->h_sequence != ihdr->h_sequence
|| hdr->h_len != ihdr->h_len
|| hdr->h_sport != ihdr->h_sport
|| hdr->h_dport != ihdr->h_dport) {
if (hdr->h_sequence != ihdr->h_sequence ||
hdr->h_len != ihdr->h_len ||
hdr->h_sport != ihdr->h_sport ||
hdr->h_dport != ihdr->h_dport) {
rds_ib_conn_error(conn,
"fragment header mismatch; forcing reconnect\n");
return;
......
......@@ -252,8 +252,8 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
rds_ib_ring_free(&ic->i_send_ring, completed);
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)
|| test_bit(0, &conn->c_map_queued))
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
test_bit(0, &conn->c_map_queued))
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
/* We expect errors as the qp is drained during shutdown */
......
......@@ -573,8 +573,8 @@ void rds_iw_free_mr(void *trans_private, int invalidate)
rds_iw_free_fastreg(pool, ibmr);
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned
|| atomic_read(&pool->dirty_count) >= pool->max_items / 10)
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
atomic_read(&pool->dirty_count) >= pool->max_items / 10)
queue_work(rds_wq, &pool->flush_worker);
if (invalidate) {
......
......@@ -230,8 +230,8 @@ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
int ret = 0;
u32 pos;
while ((prefill || rds_conn_up(conn))
&& rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
while ((prefill || rds_conn_up(conn)) &&
rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
if (pos >= ic->i_recv_ring.w_nr) {
printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
pos);
......@@ -730,10 +730,10 @@ static void rds_iw_process_recv(struct rds_connection *conn,
hdr = &iwinc->ii_inc.i_hdr;
/* We can't just use memcmp here; fragments of a
* single message may carry different ACKs */
if (hdr->h_sequence != ihdr->h_sequence
|| hdr->h_len != ihdr->h_len
|| hdr->h_sport != ihdr->h_sport
|| hdr->h_dport != ihdr->h_dport) {
if (hdr->h_sequence != ihdr->h_sequence ||
hdr->h_len != ihdr->h_len ||
hdr->h_sport != ihdr->h_sport ||
hdr->h_dport != ihdr->h_dport) {
rds_iw_conn_error(conn,
"fragment header mismatch; forcing reconnect\n");
return;
......
......@@ -288,8 +288,8 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
rds_iw_ring_free(&ic->i_send_ring, completed);
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)
|| test_bit(0, &conn->c_map_queued))
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
test_bit(0, &conn->c_map_queued))
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
/* We expect errors as the qp is drained during shutdown */
......@@ -519,8 +519,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
/* Fastreg support */
if (rds_rdma_cookie_key(rm->m_rdma_cookie)
&& !ic->i_fastreg_posted) {
if (rds_rdma_cookie_key(rm->m_rdma_cookie) && !ic->i_fastreg_posted) {
ret = -EAGAIN;
goto out;
}
......
......@@ -122,8 +122,7 @@ int rds_message_add_extension(struct rds_header *hdr,
if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
return 0;
if (type >= __RDS_EXTHDR_MAX
|| len != rds_exthdr_size[type])
if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
return 0;
if (ext_len >= RDS_HEADER_EXT_SPACE)
......
......@@ -631,8 +631,8 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
{
struct rds_rdma_op *op;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->m_rdma_op != NULL)
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
rm->m_rdma_op != NULL)
return -EINVAL;
op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
......@@ -655,8 +655,8 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
u32 r_key;
int err = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t))
|| rm->m_rdma_cookie != 0)
if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
......@@ -692,8 +692,8 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args))
|| rm->m_rdma_cookie != 0)
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);
......
......@@ -195,8 +195,8 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
* XXX we could spend more on the wire to get more robust failure
* detection, arguably worth it to avoid data corruption.
*/
if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq
&& (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq &&
(inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
rds_stats_inc(s_recv_drop_old_seq);
goto out;
}
......@@ -432,10 +432,9 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
}
timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
(!list_empty(&rs->rs_notify_queue)
|| rs->rs_cong_notify
|| rds_next_incoming(rs, &inc)),
timeo);
(!list_empty(&rs->rs_notify_queue) ||
rs->rs_cong_notify ||
rds_next_incoming(rs, &inc)), timeo);
rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
timeo);
if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
......
......@@ -235,8 +235,8 @@ int rds_send_xmit(struct rds_connection *conn)
* connection.
* Therefore, we never retransmit messages with RDMA ops.
*/
if (rm->m_rdma_op
&& test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
if (rm->m_rdma_op &&
test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
spin_lock_irqsave(&conn->c_lock, flags);
if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
list_move(&rm->m_conn_item, &to_be_dropped);
......@@ -247,8 +247,8 @@ int rds_send_xmit(struct rds_connection *conn)
/* Require an ACK every once in a while */
len = ntohl(rm->m_inc.i_hdr.h_len);
if (conn->c_unacked_packets == 0
|| conn->c_unacked_bytes < len) {
if (conn->c_unacked_packets == 0 ||
conn->c_unacked_bytes < len) {
__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
......@@ -418,8 +418,8 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
spin_lock(&rm->m_rs_lock);
ro = rm->m_rdma_op;
if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
&& ro && ro->r_notify && ro->r_notifier) {
if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
ro && ro->r_notify && ro->r_notifier) {
notifier = ro->r_notifier;
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
......@@ -549,8 +549,7 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
list_del_init(&rm->m_sock_item);
rds_send_sndbuf_remove(rs, rm);
if (ro && ro->r_notifier
&& (status || ro->r_notify)) {
if (ro && ro->r_notifier && (status || ro->r_notify)) {
notifier = ro->r_notifier;
list_add_tail(&notifier->n_list,
&rs->rs_notify_queue);
......@@ -877,8 +876,8 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (ret)
goto out;
if ((rm->m_rdma_cookie || rm->m_rdma_op)
&& conn->c_trans->xmit_rdma == NULL) {
if ((rm->m_rdma_cookie || rm->m_rdma_op) &&
conn->c_trans->xmit_rdma == NULL) {
if (printk_ratelimit())
printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
rm->m_rdma_op, conn->c_trans->xmit_rdma);
......@@ -890,8 +889,8 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
* have scheduled a delayed reconnect however - in this case
* we should not interfere.
*/
if (rds_conn_state(conn) == RDS_CONN_DOWN
&& !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
if (rds_conn_state(conn) == RDS_CONN_DOWN &&
!test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
......@@ -973,8 +972,8 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
* have scheduled a delayed reconnect however - in this case
* we should not interfere.
*/
if (rds_conn_state(conn) == RDS_CONN_DOWN