Commit 39b566ee authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (30 commits)
  RDMA/cxgb3: Enforce required firmware
  IB/mlx4: Unregister IB device prior to CLOSE PORT command
  mlx4_core: Add link type autosensing
  mlx4_core: Don't perform SET_PORT command for Ethernet ports
  RDMA/nes: Handle MPA Reject message properly
  RDMA/nes: Improve use of PBLs
  RDMA/nes: Remove LLTX
  RDMA/nes: Inform hardware that asynchronous event has been handled
  RDMA/nes: Fix tmp_addr compilation warning
  RDMA/nes: Report correct vendor_id and vendor_part_id
  RDMA/nes: Update copyright to new legal entity and year
  RDMA/nes: Account for freed PBL after HW operation
  IB: Remove useless ibdev_is_alive() tests from sysfs code
  IB/sa_query: Fix AH leak due to update_sm_ah() race
  IB/mad: Fix ib_post_send_mad() returning 0 with no generate send comp
  IB/mad: initialize mad_agent_priv before putting on lists
  IB/mad: Fix null pointer dereference in local_completions()
  IB/mad: Fix RMPP header RRespTime manipulation
  IB/iser: Remove hard setting of path MTU
  mlx4_core: Add device IDs for MT25458 10GigE devices
  ...
parents 39f15003 09f98baf
......@@ -927,8 +927,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
unsigned long flags;
int ret = 0;
service_mask = service_mask ? service_mask :
__constant_cpu_to_be64(~0ULL);
service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
service_id &= service_mask;
if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
(service_id != IB_CM_ASSIGN_SERVICE_ID))
......@@ -954,7 +953,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
spin_lock_irqsave(&cm.lock, flags);
if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
cm_id->service_mask = ~cpu_to_be64(0);
} else {
cm_id->service_id = service_id;
cm_id->service_mask = service_mask;
......@@ -1134,7 +1133,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
goto error1;
}
cm_id->service_id = param->service_id;
cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = cm_convert_to_ms(
param->primary_path->packet_life_time) * 2 +
cm_convert_to_ms(
......@@ -1545,7 +1544,7 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = listen_cm_id_priv->id.context;
cm_id_priv->id.service_id = req_msg->service_id;
cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
......@@ -2898,7 +2897,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
goto out;
cm_id->service_id = param->service_id;
cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = param->timeout_ms;
cm_id_priv->max_cm_retries = param->max_cm_retries;
ret = cm_alloc_msg(cm_id_priv, &msg);
......@@ -2992,7 +2991,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = cur_cm_id_priv->id.context;
cm_id_priv->id.service_id = sidr_req_msg->service_id;
cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
cm_process_work(cm_id_priv, work);
......@@ -3789,7 +3788,7 @@ static int __init ib_cm_init(void)
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
cm.listen_service_table = RB_ROOT;
cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.remote_id_table = RB_ROOT;
cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT;
......
......@@ -44,17 +44,17 @@
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
#define CM_REQ_ATTR_ID __constant_htons(0x0010)
#define CM_MRA_ATTR_ID __constant_htons(0x0011)
#define CM_REJ_ATTR_ID __constant_htons(0x0012)
#define CM_REP_ATTR_ID __constant_htons(0x0013)
#define CM_RTU_ATTR_ID __constant_htons(0x0014)
#define CM_DREQ_ATTR_ID __constant_htons(0x0015)
#define CM_DREP_ATTR_ID __constant_htons(0x0016)
#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
#define CM_LAP_ATTR_ID __constant_htons(0x0019)
#define CM_APR_ATTR_ID __constant_htons(0x001A)
#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
enum cm_msg_sequence {
CM_MSG_SEQUENCE_REQ,
......
......@@ -193,7 +193,7 @@ void ib_dealloc_device(struct ib_device *device)
BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
ib_device_unregister_sysfs(device);
kobject_put(&device->dev.kobj);
}
EXPORT_SYMBOL(ib_dealloc_device);
......@@ -348,6 +348,8 @@ void ib_unregister_device(struct ib_device *device)
mutex_unlock(&device_mutex);
ib_device_unregister_sysfs(device);
spin_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
kfree(context);
......
......@@ -301,6 +301,16 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
mad_agent_priv->agent.context = context;
mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
mad_agent_priv->agent.port_num = port_num;
spin_lock_init(&mad_agent_priv->lock);
INIT_LIST_HEAD(&mad_agent_priv->send_list);
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
INIT_LIST_HEAD(&mad_agent_priv->done_list);
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
INIT_LIST_HEAD(&mad_agent_priv->local_list);
INIT_WORK(&mad_agent_priv->local_work, local_completions);
atomic_set(&mad_agent_priv->refcount, 1);
init_completion(&mad_agent_priv->comp);
spin_lock_irqsave(&port_priv->reg_lock, flags);
mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
......@@ -350,17 +360,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
spin_unlock_irqrestore(&port_priv->reg_lock, flags);
spin_lock_init(&mad_agent_priv->lock);
INIT_LIST_HEAD(&mad_agent_priv->send_list);
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
INIT_LIST_HEAD(&mad_agent_priv->done_list);
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
INIT_LIST_HEAD(&mad_agent_priv->local_list);
INIT_WORK(&mad_agent_priv->local_work, local_completions);
atomic_set(&mad_agent_priv->refcount, 1);
init_completion(&mad_agent_priv->comp);
return &mad_agent_priv->agent;
error4:
......@@ -743,9 +742,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
break;
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
kmem_cache_free(ib_mad_cache, mad_priv);
kfree(local);
ret = 1;
goto out;
break;
case IB_MAD_RESULT_SUCCESS:
/* Treat like an incoming receive MAD */
port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
......@@ -756,10 +753,12 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
&mad_priv->mad.mad);
}
if (!port_priv || !recv_mad_agent) {
/*
* No receiving agent so drop packet and
* generate send completion.
*/
kmem_cache_free(ib_mad_cache, mad_priv);
kfree(local);
ret = 0;
goto out;
break;
}
local->mad_priv = mad_priv;
local->recv_mad_agent = recv_mad_agent;
......@@ -2356,7 +2355,7 @@ static void local_completions(struct work_struct *work)
struct ib_mad_local_private *local;
struct ib_mad_agent_private *recv_mad_agent;
unsigned long flags;
int recv = 0;
int free_mad;
struct ib_wc wc;
struct ib_mad_send_wc mad_send_wc;
......@@ -2370,14 +2369,15 @@ static void local_completions(struct work_struct *work)
completion_list);
list_del(&local->completion_list);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
free_mad = 0;
if (local->mad_priv) {
recv_mad_agent = local->recv_mad_agent;
if (!recv_mad_agent) {
printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
free_mad = 1;
goto local_send_completion;
}
recv = 1;
/*
* Defined behavior is to complete response
* before request
......@@ -2422,7 +2422,7 @@ static void local_completions(struct work_struct *work)
spin_lock_irqsave(&mad_agent_priv->lock, flags);
atomic_dec(&mad_agent_priv->refcount);
if (!recv)
if (free_mad)
kmem_cache_free(ib_mad_cache, local->mad_priv);
kfree(local);
}
......
......@@ -735,7 +735,7 @@ process_rmpp_data(struct ib_mad_agent_private *agent,
goto bad;
}
if (rmpp_hdr->seg_num == __constant_htonl(1)) {
if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
goto bad;
......
......@@ -395,6 +395,8 @@ static void update_sm_ah(struct work_struct *work)
}
spin_lock_irq(&port->ah_lock);
if (port->sm_ah)
kref_put(&port->sm_ah->ref, free_sm_ah);
port->sm_ah = new_ah;
spin_unlock_irq(&port->ah_lock);
......
......@@ -66,11 +66,6 @@ struct port_table_attribute {
int index;
};
static inline int ibdev_is_alive(const struct ib_device *dev)
{
return dev->reg_state == IB_DEV_REGISTERED;
}
static ssize_t port_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
......@@ -80,8 +75,6 @@ static ssize_t port_attr_show(struct kobject *kobj,
if (!port_attr->show)
return -EIO;
if (!ibdev_is_alive(p->ibdev))
return -ENODEV;
return port_attr->show(p, port_attr, buf);
}
......@@ -562,9 +555,6 @@ static ssize_t show_node_type(struct device *device,
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
if (!ibdev_is_alive(dev))
return -ENODEV;
switch (dev->node_type) {
case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
......@@ -581,9 +571,6 @@ static ssize_t show_sys_image_guid(struct device *device,
struct ib_device_attr attr;
ssize_t ret;
if (!ibdev_is_alive(dev))
return -ENODEV;
ret = ib_query_device(dev, &attr);
if (ret)
return ret;
......@@ -600,9 +587,6 @@ static ssize_t show_node_guid(struct device *device,
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
if (!ibdev_is_alive(dev))
return -ENODEV;
return sprintf(buf, "%04x:%04x:%04x:%04x\n",
be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
......@@ -848,6 +832,9 @@ void ib_device_unregister_sysfs(struct ib_device *device)
struct kobject *p, *t;
struct ib_port *port;
/* Hold kobject until ib_dealloc_device() */
kobject_get(&device->dev.kobj);
list_for_each_entry_safe(p, t, &device->port_list, entry) {
list_del(&p->entry);
port = container_of(p, struct ib_port, kobj);
......
......@@ -450,7 +450,7 @@ static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
return 0;
if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) &&
if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
return 0;
......@@ -938,6 +938,23 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
if (!rdev_p->t3cdev_p)
rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
rdev_p->t3cdev_p->ulp = (void *) rdev_p;
err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
&(rdev_p->fw_info));
if (err) {
printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
__func__, rdev_p->t3cdev_p, err);
goto err1;
}
if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
printk(KERN_ERR MOD "fatal firmware version mismatch: "
"need version %u but adapter has version %u\n",
CXIO_FW_MAJ,
G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
err = -EINVAL;
goto err1;
}
err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
&(rdev_p->rnic_info));
if (err) {
......@@ -1204,11 +1221,12 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
}
/* incoming SEND with no receive posted failures */
if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) &&
if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
ret = -1;
goto skip_cqe;
}
BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
goto proc_cqe;
}
......@@ -1223,6 +1241,13 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
* then we complete this with TPT_ERR_MSN and mark the wq in
* error.
*/
if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
wq->error = 1;
ret = -1;
goto skip_cqe;
}
if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
wq->error = 1;
hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
......@@ -1277,6 +1302,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
cxio_hal_pblpool_free(wq->rdev,
wq->rq[Q_PTR2IDX(wq->rq_rptr,
wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
wq->rq_rptr++;
}
......
......@@ -61,6 +61,8 @@
#define T3_MAX_DEV_NAME_LEN 32
#define CXIO_FW_MAJ 7
struct cxio_hal_ctrl_qp {
u32 wptr;
u32 rptr;
......@@ -108,6 +110,7 @@ struct cxio_rdev {
struct gen_pool *pbl_pool;
struct gen_pool *rqt_pool;
struct list_head entry;
struct ch_embedded_info fw_info;
};
static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
......
......@@ -604,6 +604,12 @@ struct t3_cqe {
#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
#define CQE_SEND_OPCODE(x)( \
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
#define CQE_LEN(x) (be32_to_cpu((x).len))
/* used for RQ completion processing */
......
......@@ -1678,6 +1678,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
{
struct iwch_ep *ep = ctx;
if (state_read(&ep->com) != FPDU_MODE)
return CPL_RET_BUF_DONE;
PDBG("%s ep %p\n", __func__, ep);
skb_pull(skb, sizeof(struct cpl_rdma_terminate));
PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
......
......@@ -179,11 +179,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
case TPT_ERR_BOUND:
case TPT_ERR_INVALIDATE_SHARED_MR:
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
"type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
break;
......
......@@ -99,8 +99,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
plen = 4;
wqe->write.sgl[0].stag = wr->ex.imm_data;
wqe->write.sgl[0].len = __constant_cpu_to_be32(0);
wqe->write.num_sgle = __constant_cpu_to_be32(0);
wqe->write.sgl[0].len = cpu_to_be32(0);
wqe->write.num_sgle = cpu_to_be32(0);
*flit_cnt = 6;
} else {
plen = 0;
......@@ -195,15 +195,12 @@ static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
return 0;
}
/*
* TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
*/
static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
u32 num_sgle, u32 * pbl_addr, u8 * page_size)
{
int i;
struct iwch_mr *mhp;
u32 offset;
u64 offset;
for (i = 0; i < num_sgle; i++) {
mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
......@@ -235,8 +232,8 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
return -EINVAL;
}
offset = sg_list[i].addr - mhp->attr.va_fbo;
offset += ((u32) mhp->attr.va_fbo) %
(1UL << (12 + mhp->attr.page_size));
offset += mhp->attr.va_fbo &
((1UL << (12 + mhp->attr.page_size)) - 1);
pbl_addr[i] = ((mhp->attr.pbl_addr -
rhp->rdev.rnic_info.pbl_base) >> 3) +
(offset >> (12 + mhp->attr.page_size));
......@@ -266,8 +263,8 @@ static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
/* to in the WQE == the offset into the page */
wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %
(1UL << (12 + page_size[i])));
wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
((1UL << (12 + page_size[i])) - 1));
/* pbl_addr is the adapters address in the PBL */
wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
......
......@@ -46,11 +46,11 @@
#include "ehca_iverbs.h"
#include "hcp_if.h"
#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002)
#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004)
#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008)
#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002)
#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004)
#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008)
#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001)
#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
/**
* ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
......
......@@ -772,8 +772,8 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
"0x%x, not 0x%x\n", csum, ifp->if_csum);
goto done;
}
if (*(__be64 *) ifp->if_guid == 0ULL ||
*(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) {
if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
*(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
ipath_dev_err(dd, "Invalid GUID %llx from flash; "
"ignoring\n",
*(unsigned long long *) ifp->if_guid);
......
......@@ -455,7 +455,7 @@ static void init_shadow_tids(struct ipath_devdata *dd)
if (!addrs) {
ipath_dev_err(dd, "failed to allocate shadow dma handle "
"array, no expected sends!\n");
vfree(dd->ipath_pageshadow);
vfree(pages);
dd->ipath_pageshadow = NULL;
return;
}
......
......@@ -37,10 +37,10 @@
#include "ipath_verbs.h"
#include "ipath_common.h"
#define IB_SMP_UNSUP_VERSION __constant_htons(0x0004)
#define IB_SMP_UNSUP_METHOD __constant_htons(0x0008)
#define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C)
#define IB_SMP_INVALID_FIELD __constant_htons(0x001C)
#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
static int reply(struct ib_smp *smp)
{
......@@ -789,12 +789,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
return recv_subn_get_pkeytable(smp, ibdev);
}
#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001)
#define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010)
#define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011)
#define IB_PMA_PORT_COUNTERS __constant_htons(0x0012)
#define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D)
#define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E)
#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
struct ib_perf {
u8 base_version;
......@@ -884,19 +884,19 @@ struct ib_pma_portcounters {
__be32 port_rcv_packets;
} __attribute__ ((packed));
#define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001)
#define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002)
#define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004)
#define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008)
#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010)
#define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040)
#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200)
#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS __constant_htons(0x0400)
#define IB_PMA_SEL_PORT_VL15_DROPPED __constant_htons(0x0800)
#define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000)
#define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000)
#define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000)
#define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000)
#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
struct ib_pma_portcounters_ext {
u8 reserved;
......@@ -913,14 +913,14 @@ struct ib_pma_portcounters_ext {
__be64 port_multicast_rcv_packets;
} __attribute__ ((packed));
#define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001)
#define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002)
#define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004)
#define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008)
#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010)
#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020)
#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040)
#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080)
#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
static int recv_pma_get_classportinfo(struct ib_perf *pmp)
{
......@@ -933,7 +933,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
pmp->status |= IB_SMP_INVALID_FIELD;
/* Indicate AllPortSelect is valid (only one port anyway) */
p->cap_mask = __constant_cpu_to_be16(1 << 8);
p->cap_mask = cpu_to_be16(1 << 8);
p->base_version = 1;
p->class_version = 1;
/*
......@@ -951,12 +951,11 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
* We support 5 counters which only count the mandatory quantities.
*/
#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
#define COUNTER_MASK0_9 \
__constant_cpu_to_be32(COUNTER_MASK(1, 0) | \
COUNTER_MASK(1, 1) | \
COUNTER_MASK(1, 2) | \
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
COUNTER_MASK(1, 1) | \
COUNTER_MASK(1, 2) | \
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
......@@ -1137,7 +1136,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
status = dev->pma_sample_status;
p->sample_status = cpu_to_be16(status);
/* 64 bits */
p->extended_width = __constant_cpu_to_be32(0x80000000);
p->extended_width = cpu_to_be32(0x80000000);
for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
cpu_to_be64(
......@@ -1185,7 +1184,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
pmp->status |= IB_SMP_INVALID_FIELD;
if (cntrs.symbol_error_counter > 0xFFFFUL)
p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF);
p->symbol_error_counter = cpu_to_be16(0xFFFF);
else
p->symbol_error_counter =
cpu_to_be16((u16)cntrs.symbol_error_counter);
......@@ -1199,17 +1198,17 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
else
p->link_downed_counter = (u8)cntrs.link_downed_counter;
if (cntrs.port_rcv_errors > 0xFFFFUL)
p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF);
p->port_rcv_errors = cpu_to_be16(0xFFFF);
else
p->port_rcv_errors =
cpu_to_be16((u16) cntrs.port_rcv_errors);
if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF);
p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
else
p->port_rcv_remphys_errors =
cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
if (cntrs.port_xmit_discards > 0xFFFFUL)
p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF);
p->port_xmit_discards = cpu_to_be16(0xFFFF);
else
p->port_xmit_discards =
cpu_to_be16((u16)cntrs.port_xmit_discards);
......@@ -1220,24 +1219,24 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = __constant_cpu_to_be16(0xFFFF);
p->vl15_dropped = cpu_to_be16(0xFFFF);
else
p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
else
p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF);
p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
else
p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF);
p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
else
p->port_xmit_packets =
cpu_to_be32((u32)cntrs.port_xmit_packets);
if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF);
p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
else
p->port_rcv_packets =
cpu_to_be32((u32) cntrs.port_rcv_packets);
......
......@@ -1744,7 +1744,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
/* Signal completion event if the solicited bit is set. */
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
__constant_cpu_to_be32(1 << 23)) != 0);
cpu_to_be32(1 << 23)) != 0);
break;
case OP(RDMA_WRITE_FIRST):
......
......@@ -781,10 +781,10 @@ int ipath_sdma_verbs_send(struct ipath_devdata *dd,
descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
descqp -= 2;
/* SDmaLastDesc */
descqp[0] |= __constant_cpu_to_le64(1ULL << 11);
descqp[0] |= cpu_to_le64(1ULL << 11);
if (tx