Commit 1d248b25 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (26 commits)
  IB/mlx4: Set ownership bit correctly when copying CQEs during CQ resize
  RDMA/nes: Remove tx_free_list
  RDMA/cma: Add IPv6 support
  RDMA/addr: Add support for translating IPv6 addresses
  mlx4_core: Delete incorrect comment
  mlx4_core: Add support for multiple completion event vectors
  IB/iser: Avoid recv buffer exhaustion caused by unexpected PDUs
  IB/ehca: Remove redundant test of vpage
  IB/ehca: Replace modulus operations in flush error completion path
  IB/ipath: Add locking for interrupt use of ipath_pd contexts vs free
  IB/ipath: Fix spi_pioindex value
  IB/ipath: Only do 1X workaround on rev1 chips
  IB/ipath: Don't count IB symbol and link errors unless link is UP
  IB/ipath: Check return value of dma_map_single()
  IB/ipath: Fix PSN of send WQEs after an RDMA read resend
  RDMA/nes: Cleanup warnings
  RDMA/nes: Add loopback check to make_cm_node()
  RDMA/nes: Check cqp_avail_reqs is empty after locking the list
  RDMA/nes: Fix TCP compliance test failures
  RDMA/nes: Forward packets for a new connection with stale APBVT entry
  ...
parents 1db2a5c1 2a0d8366
......@@ -41,6 +41,8 @@
#include <net/neighbour.h>
#include <net/route.h>
#include <net/netevent.h>
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <rdma/ib_addr.h>
MODULE_AUTHOR("Sean Hefty");
......@@ -49,8 +51,8 @@ MODULE_LICENSE("Dual BSD/GPL");
struct addr_req {
struct list_head list;
struct sockaddr src_addr;
struct sockaddr dst_addr;
struct sockaddr_storage src_addr;
struct sockaddr_storage dst_addr;
struct rdma_dev_addr *addr;
struct rdma_addr_client *client;
void *context;
......@@ -113,15 +115,32 @@ EXPORT_SYMBOL(rdma_copy_addr);
int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
{
struct net_device *dev;
__be32 ip = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
int ret;
int ret = -EADDRNOTAVAIL;
switch (addr->sa_family) {
case AF_INET:
dev = ip_dev_find(&init_net,
((struct sockaddr_in *) addr)->sin_addr.s_addr);
dev = ip_dev_find(&init_net, ip);
if (!dev)
return -EADDRNOTAVAIL;
return ret;
ret = rdma_copy_addr(dev_addr, dev, NULL);
dev_put(dev);
break;
case AF_INET6:
for_each_netdev(&init_net, dev) {
if (ipv6_chk_addr(&init_net,
&((struct sockaddr_in6 *) addr)->sin6_addr,
dev, 1)) {
ret = rdma_copy_addr(dev_addr, dev, NULL);
break;
}
}
break;
default:
break;
}
return ret;
}
EXPORT_SYMBOL(rdma_translate_ip);
......@@ -156,22 +175,37 @@ static void queue_req(struct addr_req *req)
mutex_unlock(&lock);
}
static void addr_send_arp(struct sockaddr_in *dst_in)
static void addr_send_arp(struct sockaddr *dst_in)
{
struct rtable *rt;
struct flowi fl;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct dst_entry *dst;
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = dst_ip;
if (dst_in->sa_family == AF_INET) {
fl.nl_u.ip4_u.daddr =
((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
if (ip_route_output_key(&init_net, &rt, &fl))
return;
neigh_event_send(rt->u.dst.neighbour, NULL);
ip_rt_put(rt);
} else {
fl.nl_u.ip6_u.daddr =
((struct sockaddr_in6 *) dst_in)->sin6_addr;
dst = ip6_route_output(&init_net, NULL, &fl);
if (!dst)
return;
neigh_event_send(dst->neighbour, NULL);
dst_release(dst);
}
}
static int addr_resolve_remote(struct sockaddr_in *src_in,
static int addr4_resolve_remote(struct sockaddr_in *src_in,
struct sockaddr_in *dst_in,
struct rdma_dev_addr *addr)
{
......@@ -220,10 +254,51 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
return ret;
}
static int addr6_resolve_remote(struct sockaddr_in6 *src_in,
struct sockaddr_in6 *dst_in,
struct rdma_dev_addr *addr)
{
struct flowi fl;
struct neighbour *neigh;
struct dst_entry *dst;
int ret = -ENODATA;
memset(&fl, 0, sizeof fl);
fl.nl_u.ip6_u.daddr = dst_in->sin6_addr;
fl.nl_u.ip6_u.saddr = src_in->sin6_addr;
dst = ip6_route_output(&init_net, NULL, &fl);
if (!dst)
return ret;
if (dst->dev->flags & IFF_NOARP) {
ret = rdma_copy_addr(addr, dst->dev, NULL);
} else {
neigh = dst->neighbour;
if (neigh && (neigh->nud_state & NUD_VALID))
ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
}
dst_release(dst);
return ret;
}
static int addr_resolve_remote(struct sockaddr *src_in,
struct sockaddr *dst_in,
struct rdma_dev_addr *addr)
{
if (src_in->sa_family == AF_INET) {
return addr4_resolve_remote((struct sockaddr_in *) src_in,
(struct sockaddr_in *) dst_in, addr);
} else
return addr6_resolve_remote((struct sockaddr_in6 *) src_in,
(struct sockaddr_in6 *) dst_in, addr);
}
static void process_req(struct work_struct *work)
{
struct addr_req *req, *temp_req;
struct sockaddr_in *src_in, *dst_in;
struct sockaddr *src_in, *dst_in;
struct list_head done_list;
INIT_LIST_HEAD(&done_list);
......@@ -231,8 +306,8 @@ static void process_req(struct work_struct *work)
mutex_lock(&lock);
list_for_each_entry_safe(req, temp_req, &req_list, list) {
if (req->status == -ENODATA) {
src_in = (struct sockaddr_in *) &req->src_addr;
dst_in = (struct sockaddr_in *) &req->dst_addr;
src_in = (struct sockaddr *) &req->src_addr;
dst_in = (struct sockaddr *) &req->dst_addr;
req->status = addr_resolve_remote(src_in, dst_in,
req->addr);
if (req->status && time_after_eq(jiffies, req->timeout))
......@@ -251,41 +326,72 @@ static void process_req(struct work_struct *work)
list_for_each_entry_safe(req, temp_req, &done_list, list) {
list_del(&req->list);
req->callback(req->status, &req->src_addr, req->addr,
req->context);
req->callback(req->status, (struct sockaddr *) &req->src_addr,
req->addr, req->context);
put_client(req->client);
kfree(req);
}
}
static int addr_resolve_local(struct sockaddr_in *src_in,
struct sockaddr_in *dst_in,
static int addr_resolve_local(struct sockaddr *src_in,
struct sockaddr *dst_in,
struct rdma_dev_addr *addr)
{
struct net_device *dev;
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
int ret;
if (dst_in->sa_family == AF_INET) {
__be32 src_ip = ((struct sockaddr_in *) src_in)->sin_addr.s_addr;
__be32 dst_ip = ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
dev = ip_dev_find(&init_net, dst_ip);
if (!dev)
return -EADDRNOTAVAIL;
if (ipv4_is_zeronet(src_ip)) {
src_in->sin_family = dst_in->sin_family;
src_in->sin_addr.s_addr = dst_ip;
src_in->sa_family = dst_in->sa_family;
((struct sockaddr_in *) src_in)->sin_addr.s_addr = dst_ip;
ret = rdma_copy_addr(addr, dev, dev->dev_addr);
} else if (ipv4_is_loopback(src_ip)) {
ret = rdma_translate_ip((struct sockaddr *)dst_in, addr);
ret = rdma_translate_ip(dst_in, addr);
if (!ret)
memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
} else {
ret = rdma_translate_ip((struct sockaddr *)src_in, addr);
ret = rdma_translate_ip(src_in, addr);
if (!ret)
memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
}
dev_put(dev);
} else {
struct in6_addr *a;
for_each_netdev(&init_net, dev)
if (ipv6_chk_addr(&init_net,
&((struct sockaddr_in6 *) addr)->sin6_addr,
dev, 1))
break;
if (!dev)
return -EADDRNOTAVAIL;
a = &((struct sockaddr_in6 *) src_in)->sin6_addr;
if (ipv6_addr_any(a)) {
src_in->sa_family = dst_in->sa_family;
((struct sockaddr_in6 *) src_in)->sin6_addr =
((struct sockaddr_in6 *) dst_in)->sin6_addr;
ret = rdma_copy_addr(addr, dev, dev->dev_addr);
} else if (ipv6_addr_loopback(a)) {
ret = rdma_translate_ip(dst_in, addr);
if (!ret)
memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
} else {
ret = rdma_translate_ip(src_in, addr);
if (!ret)
memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
}
}
return ret;
}
......@@ -296,7 +402,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
struct rdma_dev_addr *addr, void *context),
void *context)
{
struct sockaddr_in *src_in, *dst_in;
struct sockaddr *src_in, *dst_in;
struct addr_req *req;
int ret = 0;
......@@ -313,8 +419,8 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->client = client;
atomic_inc(&client->refcount);
src_in = (struct sockaddr_in *) &req->src_addr;
dst_in = (struct sockaddr_in *) &req->dst_addr;
src_in = (struct sockaddr *) &req->src_addr;
dst_in = (struct sockaddr *) &req->dst_addr;
req->status = addr_resolve_local(src_in, dst_in, addr);
if (req->status == -EADDRNOTAVAIL)
......
......@@ -42,6 +42,7 @@
#include <linux/inetdevice.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <rdma/rdma_cm.h>
#include <rdma/rdma_cm_ib.h>
......@@ -636,7 +637,12 @@ static inline int cma_zero_addr(struct sockaddr *addr)
static inline int cma_loopback_addr(struct sockaddr *addr)
{
return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
if (addr->sa_family == AF_INET)
return ipv4_is_loopback(
((struct sockaddr_in *) addr)->sin_addr.s_addr);
else
return ipv6_addr_loopback(
&((struct sockaddr_in6 *) addr)->sin6_addr);
}
static inline int cma_any_addr(struct sockaddr *addr)
......@@ -1467,10 +1473,10 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)
static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
{
struct sockaddr_in addr_in;
struct sockaddr_storage addr_in;
memset(&addr_in, 0, sizeof addr_in);
addr_in.sin_family = af;
addr_in.ss_family = af;
return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
}
......@@ -2073,7 +2079,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
struct rdma_id_private *id_priv;
int ret;
if (addr->sa_family != AF_INET)
if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
return -EAFNOSUPPORT;
id_priv = container_of(id, struct rdma_id_private, id);
......@@ -2113,10 +2119,12 @@ EXPORT_SYMBOL(rdma_bind_addr);
static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
struct rdma_route *route)
{
struct sockaddr_in *src4, *dst4;
struct cma_hdr *cma_hdr;
struct sdp_hh *sdp_hdr;
if (route->addr.src_addr.ss_family == AF_INET) {
struct sockaddr_in *src4, *dst4;
src4 = (struct sockaddr_in *) &route->addr.src_addr;
dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
......@@ -2139,6 +2147,32 @@ static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
cma_hdr->port = src4->sin_port;
break;
}
} else {
struct sockaddr_in6 *src6, *dst6;
src6 = (struct sockaddr_in6 *) &route->addr.src_addr;
dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr;
switch (ps) {
case RDMA_PS_SDP:
sdp_hdr = hdr;
if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
return -EINVAL;
sdp_set_ip_ver(sdp_hdr, 6);
sdp_hdr->src_addr.ip6 = src6->sin6_addr;
sdp_hdr->dst_addr.ip6 = dst6->sin6_addr;
sdp_hdr->port = src6->sin6_port;
break;
default:
cma_hdr = hdr;
cma_hdr->cma_version = CMA_VERSION;
cma_set_ip_ver(cma_hdr, 6);
cma_hdr->src_addr.ip6 = src6->sin6_addr;
cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
cma_hdr->port = src6->sin6_port;
break;
}
}
return 0;
}
......
......@@ -175,6 +175,13 @@ struct ehca_queue_map {
unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
};
/* function to calculate the next index for the qmap */
static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
{
unsigned int temp = cur_index + 1;
return (temp == limit) ? 0 : temp;
}
struct ehca_qp {
union {
struct ib_qp ib_qp;
......
......@@ -113,7 +113,7 @@ int ehca_create_eq(struct ehca_shca *shca,
if (h_ret != H_SUCCESS || vpage)
goto create_eq_exit2;
} else {
if (h_ret != H_PAGE_REGISTERED || !vpage)
if (h_ret != H_PAGE_REGISTERED)
goto create_eq_exit2;
}
}
......
......@@ -717,6 +717,7 @@ static int __devinit ehca_probe(struct of_device *dev,
const u64 *handle;
struct ib_pd *ibpd;
int ret, i, eq_size;
unsigned long flags;
handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
if (!handle) {
......@@ -830,9 +831,9 @@ static int __devinit ehca_probe(struct of_device *dev,
ehca_err(&shca->ib_device,
"Cannot create device attributes ret=%d", ret);
spin_lock(&shca_list_lock);
spin_lock_irqsave(&shca_list_lock, flags);
list_add(&shca->shca_list, &shca_list);
spin_unlock(&shca_list_lock);
spin_unlock_irqrestore(&shca_list_lock, flags);
return 0;
......@@ -878,6 +879,7 @@ static int __devinit ehca_probe(struct of_device *dev,
static int __devexit ehca_remove(struct of_device *dev)
{
struct ehca_shca *shca = dev->dev.driver_data;
unsigned long flags;
int ret;
sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
......@@ -915,9 +917,9 @@ static int __devexit ehca_remove(struct of_device *dev)
ib_dealloc_device(&shca->ib_device);
spin_lock(&shca_list_lock);
spin_lock_irqsave(&shca_list_lock, flags);
list_del(&shca->shca_list);
spin_unlock(&shca_list_lock);
spin_unlock_irqrestore(&shca_list_lock, flags);
return ret;
}
......@@ -975,6 +977,7 @@ static int ehca_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
static unsigned long ehca_dmem_warn_time;
unsigned long flags;
switch (action) {
case MEM_CANCEL_OFFLINE:
......@@ -985,12 +988,12 @@ static int ehca_mem_notifier(struct notifier_block *nb,
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
/* only ok if no hca is attached to the lpar */
spin_lock(&shca_list_lock);
spin_lock_irqsave(&shca_list_lock, flags);
if (list_empty(&shca_list)) {
spin_unlock(&shca_list_lock);
spin_unlock_irqrestore(&shca_list_lock, flags);
return NOTIFY_OK;
} else {
spin_unlock(&shca_list_lock);
spin_unlock_irqrestore(&shca_list_lock, flags);
if (printk_timed_ratelimit(&ehca_dmem_warn_time,
30 * 1000))
ehca_gen_err("DMEM operations are not allowed"
......
......@@ -1138,14 +1138,14 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
return -EFAULT;
}
tail_idx = (qmap->tail + 1) % qmap->entries;
tail_idx = next_index(qmap->tail, qmap->entries);
wqe_idx = q_ofs / ipz_queue->qe_size;
/* check all processed wqes, whether a cqe is requested or not */
while (tail_idx != wqe_idx) {
if (qmap->map[tail_idx].cqe_req)
qmap->left_to_poll++;
tail_idx = (tail_idx + 1) % qmap->entries;
tail_idx = next_index(tail_idx, qmap->entries);
}
/* save index in queue, where we have to start flushing */
qmap->next_wqe_idx = wqe_idx;
......@@ -1195,14 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
} else {
spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
my_qp->sq_map.left_to_poll = 0;
my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
my_qp->sq_map.entries;
my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
my_qp->sq_map.entries);
spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
my_qp->rq_map.left_to_poll = 0;
my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
my_qp->rq_map.entries;
my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
my_qp->rq_map.entries);
spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
}
......
......@@ -726,13 +726,13 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
* set left_to_poll to 0 because in error state, we will not
* get any additional CQEs
*/
my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
my_qp->sq_map.entries;
my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
my_qp->sq_map.entries);
my_qp->sq_map.left_to_poll = 0;
ehca_add_to_err_list(my_qp, 1);
my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
my_qp->rq_map.entries;
my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
my_qp->rq_map.entries);
my_qp->rq_map.left_to_poll = 0;
if (HAS_RQ(my_qp))
ehca_add_to_err_list(my_qp, 0);
......@@ -860,9 +860,8 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
/* mark as reported and advance next_wqe pointer */
qmap_entry->reported = 1;
qmap->next_wqe_idx++;
if (qmap->next_wqe_idx == qmap->entries)
qmap->next_wqe_idx = 0;
qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
qmap->entries);
qmap_entry = &qmap->map[qmap->next_wqe_idx];
wc++; nr++;
......
......@@ -661,6 +661,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
static void __devexit cleanup_device(struct ipath_devdata *dd)
{
int port;
struct ipath_portdata **tmp;
unsigned long flags;
if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
/* can't do anything more with chip; needs re-init */
......@@ -742,20 +744,21 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
/*
* free any resources still in use (usually just kernel ports)
* at unload; we do for portcnt, not cfgports, because cfgports
* could have changed while we were loaded.
* at unload; we do for portcnt, because that's what we allocate.
* We acquire lock to be really paranoid that ipath_pd isn't being
* accessed from some interrupt-related code (that should not happen,
* but best to be sure).
*/
spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
tmp = dd->ipath_pd;
dd->ipath_pd = NULL;
spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
for (port = 0; port < dd->ipath_portcnt; port++) {
struct ipath_portdata *pd = dd->ipath_pd[port];
dd->ipath_pd[port] = NULL;
struct ipath_portdata *pd = tmp[port];
tmp[port] = NULL; /* debugging paranoia */
ipath_free_pddata(dd, pd);
}
kfree(dd->ipath_pd);
/*
* debuggability, in case some cleanup path tries to use it
* after this
*/
dd->ipath_pd = NULL;
kfree(tmp);
}
static void __devexit ipath_remove_one(struct pci_dev *pdev)
......@@ -2586,6 +2589,7 @@ int ipath_reset_device(int unit)
{
int ret, i;
struct ipath_devdata *dd = ipath_lookup(unit);
unsigned long flags;
if (!dd) {
ret = -ENODEV;
......@@ -2611,9 +2615,12 @@ int ipath_reset_device(int unit)
goto bail;
}
spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
if (dd->ipath_pd)
for (i = 1; i < dd->ipath_cfgports; i++) {
if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
continue;
spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
ipath_dbg("unit %u port %d is in use "
"(PID %u cmd %s), can't reset\n",
unit, i,
......@@ -2622,7 +2629,7 @@ int ipath_reset_device(int unit)
ret = -EBUSY;
goto bail;
}
}
spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
teardown_sdma(dd);
......@@ -2656,9 +2663,12 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
{
int i, sub, any = 0;
struct pid *pid;
unsigned long flags;
if (!dd->ipath_pd)
return 0;
spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
for (i = 1; i < dd->ipath_cfgports; i++) {
if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
continue;
......@@ -2682,6 +2692,7 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
any++;
}
}
spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
return any;
}
......
......@@ -223,8 +223,13 @@ static int ipath_get_base_info(struct file *fp,
(unsigned long long) kinfo->spi_subport_rcvhdr_base);
}
kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) /
dd->ipath_palign;
/*
* All user buffers are 2KB buffers. If we ever support
* giving 4KB buffers to user processes, this will need some
* work.
*/
kinfo->spi_pioindex = (kinfo->spi_piobufbase -
(dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign;
kinfo->spi_pioalign = dd->ipath_palign;
kinfo->spi_qpair = IPATH_KD_QP;
......@@ -2041,7 +2046,9 @@ static int ipath_close(struct inode *in, struct file *fp)
struct ipath_filedata *fd;
struct ipath_portdata *pd;
struct ipath_devdata *dd;
unsigned long flags;
unsigned port;
struct pid *pid;
ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
(long)in->i_rdev, fp->private_data);
......@@ -2074,14 +2081,13 @@ static int ipath_close(struct inode *in, struct file *fp)
mutex_unlock(&ipath_mutex);
goto bail;
}
/* early; no interrupt users after this */
spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
port = pd->port_port;
if (pd->port_hdrqfull) {
ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
"during run\n", pd->port_comm, pid_nr(pd->port_pid),
pd->port_hdrqfull);
pd->port_hdrqfull = 0;
}
dd->ipath_pd[port] = NULL;
pid = pd->port_pid;
pd->port_pid = NULL;
spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
if (pd->port_rcvwait_to || pd->port_piowait_to
|| pd->port_rcvnowait || pd->port_pionowait) {
......@@ -2138,13 +2144,11 @@ static int ipath_close(struct inode *in, struct file *fp)
unlock_expected_tids(pd);
ipath_stats.sps_ports--;
ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
pd->port_comm, pid_nr(pd->port_pid),
pd->port_comm, pid_nr(pid),
dd->ipath_unit, port);
}
put_pid(pd->port_pid);
pd->port_pid = NULL;
dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
put_pid(pid);
mutex_unlock(&ipath_mutex);
ipath_free_pddata(dd, pd); /* after releasing the mutex */
......
......@@ -86,7 +86,7 @@ static int create_file(const char *name, mode_t mode,
*dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry))
if (!IS_ERR(*dentry))
error = ipathfs_mknod(parent->d_inode, *dentry,
mode, fops, data);
else
......
......@@ -721,6 +721,12 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
INFINIPATH_HWE_SERDESPLLFAILED);
}
dd->ibdeltainprog = 1;
dd->ibsymsnap =
ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
dd->iblnkerrsnap =
ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
......@@ -810,6 +816,36 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
{
u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
if (dd->ibsymdelta || dd->iblnkerrdelta ||
dd->ibdeltainprog) {
u64 diagc;
/* enable counter writes */
diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);