Commit 6c3c1eb3 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Receive packet length needs to be adjust by 2 on RX to accomodate
    the two padding bytes in altera_tse driver.  From Vlastimil Setka.

 2) If rx frame is dropped due to out of memory in macb driver, we leave
    the receive ring descriptors in an undefined state.  From Punnaiah
    Choudary Kalluri

 3) Some netlink subsystems erroneously signal NLM_F_MULTI.  That is
    only for dumps.  Fix from Nicolas Dichtel.

 4) Fix mis-use of raw rt->rt_pmtu value in ipv4, one must always go via
    the ipv4_mtu() helper.  From Herbert Xu.

 5) Fix null deref in bridge netfilter, and miscalculated lengths in
    jump/goto nf_tables verdicts.  From Florian Westphal.

 6) Unhash ping sockets properly.

 7) Software implementation of BPF divide did 64/32 rather than 64/64
    bit divide.  The JITs got it right.  Fix from Alexei Starovoitov.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (30 commits)
  ipv4: Missing sk_nulls_node_init() in ping_unhash().
  net: fec: Fix RGMII-ID mode
  net/mlx4_en: Schedule napi when RX buffers allocation fails
  netxen_nic: use spin_[un]lock_bh around tx_clean_lock
  net/mlx4_core: Fix unaligned accesses
  mlx4_en: Use correct loop cursor in error path.
  cxgb4: Fix MC1 memory offset calculation
  bnx2x: Delay during kdump load
  net: Fix Kernel Panic in bonding driver debugfs file: rlb_hash_table
  net: dsa: Fix scope of eeprom-length property
  net: macb: Fix race condition in driver when Rx frame is dropped
  hv_netvsc: Fix a bug in netvsc_start_xmit()
  altera_tse: Correct rx packet length
  mlx4: Fix tx ring affinity_mask creation
  tipc: fix problem with parallel link synchronization mechanism
  tipc: remove wrong use of NLM_F_MULTI
  bridge/nl: remove wrong use of NLM_F_MULTI
  bridge/mdb: remove wrong use of NLM_F_MULTI
  net: sched: act_connmark: don't zap skb->nfct
  trivial: net: systemport: bcmsysport.h: fix 0x0x prefix
  ...
parents e412d3a3 a134f083
......@@ -4544,6 +4544,8 @@ unsigned int bond_get_num_tx_queues(void)
int bond_create(struct net *net, const char *name)
{
struct net_device *bond_dev;
struct bonding *bond;
struct alb_bond_info *bond_info;
int res;
rtnl_lock();
......@@ -4557,6 +4559,14 @@ int bond_create(struct net *net, const char *name)
return -ENOMEM;
}
/*
* Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
* It is set to 0 by default which is wrong.
*/
bond = netdev_priv(bond_dev);
bond_info = &(BOND_ALB_INFO(bond));
bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
......
......@@ -391,6 +391,12 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
"RCV pktstatus %08X pktlength %08X\n",
pktstatus, pktlength);
/* DMA trasfer from TSE starts with 2 aditional bytes for
* IP payload alignment. Status returned by get_rx_status()
* contains DMA transfer length. Packet is 2 bytes shorter.
*/
pktlength -= 2;
count++;
next_entry = (++priv->rx_cons) % priv->rx_ring_size;
......
......@@ -129,7 +129,7 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
#define TWSI_CTRL_SW_LDSTART 0x800
#define TWSI_CTRL_HW_LDSTART 0x1000
#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F
#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
#define TWSI_CTRL_LD_EXIST 0x400000
#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
......
......@@ -543,7 +543,7 @@ struct bcm_sysport_tx_counters {
u32 jbr; /* RO # of xmited jabber count*/
u32 bytes; /* RO # of xmited byte count */
u32 pok; /* RO # of xmited good pkt */
u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
u32 uc; /* RO (0x4f0) # of xmited unicast pkt */
};
struct bcm_sysport_mib {
......
......@@ -521,6 +521,7 @@ struct bnx2x_fp_txdata {
};
enum bnx2x_tpa_mode_t {
TPA_MODE_DISABLED,
TPA_MODE_LRO,
TPA_MODE_GRO
};
......@@ -589,7 +590,6 @@ struct bnx2x_fastpath {
/* TPA related */
struct bnx2x_agg_info *tpa_info;
u8 disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used;
#endif
......@@ -1545,9 +1545,7 @@ struct bnx2x {
#define USING_MSIX_FLAG (1 << 5)
#define USING_MSI_FLAG (1 << 6)
#define DISABLE_MSI_FLAG (1 << 7)
#define TPA_ENABLE_FLAG (1 << 8)
#define NO_MCP_FLAG (1 << 9)
#define GRO_ENABLE_FLAG (1 << 10)
#define MF_FUNC_DIS (1 << 11)
#define OWN_CNIC_IRQ (1 << 12)
#define NO_ISCSI_OOO_FLAG (1 << 13)
......
......@@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
u16 frag_size, pages;
#ifdef BNX2X_STOP_ON_ERROR
/* sanity check */
if (fp->disable_tpa &&
if (fp->mode == TPA_MODE_DISABLED &&
(CQE_TYPE_START(cqe_fp_type) ||
CQE_TYPE_STOP(cqe_fp_type)))
BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
CQE_TYPE(cqe_fp_type));
#endif
......@@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
DP(NETIF_MSG_IFUP,
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
if (!fp->disable_tpa) {
if (fp->mode != TPA_MODE_DISABLED) {
/* Fill the per-aggregation pool */
for (i = 0; i < MAX_AGG_QS(bp); i++) {
struct bnx2x_agg_info *tpa_info =
......@@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
j);
bnx2x_free_tpa_pool(bp, fp, i);
fp->disable_tpa = 1;
fp->mode = TPA_MODE_DISABLED;
break;
}
dma_unmap_addr_set(first_buf, mapping, 0);
......@@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
ring_prod);
bnx2x_free_tpa_pool(bp, fp,
MAX_AGG_QS(bp));
fp->disable_tpa = 1;
fp->mode = TPA_MODE_DISABLED;
ring_prod = 0;
break;
}
......@@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
bnx2x_free_rx_bds(fp);
if (!fp->disable_tpa)
if (fp->mode != TPA_MODE_DISABLED)
bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
}
}
......@@ -2477,19 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation
*/
fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
(bp->flags & GRO_ENABLE_FLAG &&
bnx2x_mtu_allows_gro(bp->dev->mtu)));
if (bp->flags & TPA_ENABLE_FLAG)
if (bp->dev->features & NETIF_F_LRO)
fp->mode = TPA_MODE_LRO;
else if (bp->flags & GRO_ENABLE_FLAG)
else if (bp->dev->features & NETIF_F_GRO &&
bnx2x_mtu_allows_gro(bp->dev->mtu))
fp->mode = TPA_MODE_GRO;
else
fp->mode = TPA_MODE_DISABLED;
/* We don't want TPA if it's disabled in bp
* or if this is an FCoE L2 ring.
*/
if (bp->disable_tpa || IS_FCOE_FP(fp))
fp->disable_tpa = 1;
fp->mode = TPA_MODE_DISABLED;
}
int bnx2x_load_cnic(struct bnx2x *bp)
......@@ -2610,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/*
* Zero fastpath structures preserving invariants like napi, which are
* allocated only once, fp index, max_cos, bp pointer.
* Also set fp->disable_tpa and txdata_ptr.
* Also set fp->mode and txdata_ptr.
*/
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
......@@ -3249,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi)
if ((bp->state == BNX2X_STATE_CLOSED) ||
(bp->state == BNX2X_STATE_ERROR) ||
(bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
(bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
return LL_FLUSH_FAILED;
if (!bnx2x_fp_lock_poll(fp))
......@@ -4545,7 +4545,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
* In these cases we disable the queue
* Min size is different for OOO, TPA and non-TPA queues
*/
if (ring_size < (fp->disable_tpa ?
if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
/* release memory allocated for this queue */
bnx2x_free_fp_mem_at(bp, index);
......@@ -4834,29 +4834,15 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
features &= ~NETIF_F_GRO;
}
/* Note: do not disable SW GRO in kernel when HW GRO is off */
if (bp->disable_tpa)
features &= ~NETIF_F_LRO;
return features;
}
int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
u32 flags = bp->flags;
u32 changes;
netdev_features_t changes = features ^ dev->features;
bool bnx2x_reload = false;
if (features & NETIF_F_LRO)
flags |= TPA_ENABLE_FLAG;
else
flags &= ~TPA_ENABLE_FLAG;
if (features & NETIF_F_GRO)
flags |= GRO_ENABLE_FLAG;
else
flags &= ~GRO_ENABLE_FLAG;
int rc;
/* VFs or non SRIOV PFs should be able to change loopback feature */
if (!pci_num_vf(bp->pdev)) {
......@@ -4873,24 +4859,23 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
}
}
changes = flags ^ bp->flags;
/* if GRO is changed while LRO is enabled, don't force a reload */
if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
changes &= ~GRO_ENABLE_FLAG;
if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
changes &= ~NETIF_F_GRO;
/* if GRO is changed while HW TPA is off, don't force a reload */
if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
changes &= ~GRO_ENABLE_FLAG;
if ((changes & NETIF_F_GRO) && bp->disable_tpa)
changes &= ~NETIF_F_GRO;
if (changes)
bnx2x_reload = true;
bp->flags = flags;
if (bnx2x_reload) {
if (bp->recovery_state == BNX2X_RECOVERY_DONE)
return bnx2x_reload_if_running(dev);
if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
dev->features = features;
rc = bnx2x_reload_if_running(dev);
return rc ? rc : 1;
}
/* else: bnx2x_nic_load() will be called at end of recovery */
}
......
......@@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{
int i;
if (fp->disable_tpa)
if (fp->mode == TPA_MODE_DISABLED)
return;
for (i = 0; i < last; i++)
......
......@@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
}
if (!fp->disable_tpa) {
if (fp->mode != TPA_MODE_DISABLED) {
__set_bit(BNX2X_Q_FLG_TPA, &flags);
__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
if (fp->mode == TPA_MODE_GRO)
......@@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
u16 sge_sz = 0;
u16 tpa_agg_size = 0;
if (!fp->disable_tpa) {
if (fp->mode != TPA_MODE_DISABLED) {
pause->sge_th_lo = SGE_TH_LO(bp);
pause->sge_th_hi = SGE_TH_HI(bp);
......@@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
/* This flag is relevant for E1x only.
* E2 doesn't have a TPA configuration in a function level.
*/
flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
func_init.func_flgs = flags;
func_init.pf_id = BP_FUNC(bp);
......@@ -12107,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
/* Set TPA flags */
if (bp->disable_tpa) {
bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
bp->dev->hw_features &= ~NETIF_F_LRO;
bp->dev->features &= ~NETIF_F_LRO;
} else {
bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
bp->dev->features |= NETIF_F_LRO;
}
if (CHIP_IS_E1(bp))
......@@ -13371,6 +13368,12 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bool is_vf;
int cnic_cnt;
/* Management FW 'remembers' living interfaces. Allow it some time
* to forget previously living interfaces, allowing a proper re-load.
*/
if (is_kdump_kernel())
msleep(5000);
/* An estimated maximum supported CoS number according to the chip
* version.
* We will try to roughly estimate the maximum number of CoSes this chip
......
......@@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
/* select tpa mode to request */
if (!fp->disable_tpa) {
if (fp->mode != TPA_MODE_DISABLED) {
flags |= VFPF_QUEUE_FLG_TPA;
flags |= VFPF_QUEUE_FLG_TPA_IPV6;
if (fp->mode == TPA_MODE_GRO)
......
......@@ -707,6 +707,9 @@ static void gem_rx_refill(struct macb *bp)
/* properly align Ethernet header */
skb_reserve(skb, NET_IP_ALIGN);
} else {
bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
bp->rx_ring[entry].ctrl = 0;
}
}
......
......@@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
memoffset = (mtype * (edc_size * 1024 * 1024));
else {
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
MA_EXT_MEMORY1_BAR_A));
MA_EXT_MEMORY0_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
}
......
......@@ -4846,7 +4846,8 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
}
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask)
struct net_device *dev, u32 filter_mask,
int nlflags)
{
struct be_adapter *adapter = netdev_priv(dev);
int status = 0;
......@@ -4868,7 +4869,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
hsw_mode == PORT_FWD_TYPE_VEPA ?
BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
0, 0);
0, 0, nlflags);
}
#ifdef CONFIG_BE2NET_VXLAN
......
......@@ -988,7 +988,10 @@ fec_restart(struct net_device *ndev)
rcntl |= 0x40000000 | 0x00000020;
/* RGMII, RMII or MII */
if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
rcntl |= (1 << 6);
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
rcntl |= (1 << 8);
......
......@@ -8053,10 +8053,10 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
#ifdef HAVE_BRIDGE_FILTER
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
u32 __always_unused filter_mask)
u32 __always_unused filter_mask, int nlflags)
#else
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev)
struct net_device *dev, int nlflags)
#endif /* HAVE_BRIDGE_FILTER */
{
struct i40e_netdev_priv *np = netdev_priv(dev);
......@@ -8078,7 +8078,8 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
if (!veb)
return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
nlflags);
}
#endif /* HAVE_BRIDGE_ATTRIBS */
......
......@@ -8044,7 +8044,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask)
u32 filter_mask, int nlflags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
......@@ -8052,7 +8052,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
adapter->bridge_mode, 0, 0);
adapter->bridge_mode, 0, 0, nlflags);
}
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
......
......@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_ptp_overflow_check(mdev);
mlx4_en_recover_from_oom(priv);
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
}
......@@ -1721,7 +1722,7 @@ int mlx4_en_start_port(struct net_device *dev)
cq_err:
while (rx_index--) {
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
mlx4_en_free_affinity_hint(priv, i);
mlx4_en_free_affinity_hint(priv, rx_index);
}
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
......
......@@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
}
static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
{
BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
return ring->prod == ring->cons;
}
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
{
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
......@@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
ring->cons, ring->prod);
/* Unmap and free Rx buffers */
BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
while (ring->cons != ring->prod) {
while (!mlx4_en_is_ring_empty(ring)) {
index = ring->cons & ring->size_mask;
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
mlx4_en_free_rx_desc(priv, ring, index);
......@@ -491,6 +496,23 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
return err;
}
/* We recover from out of memory by scheduling our napi poll
* function (mlx4_en_process_cq), which tries to allocate
* all missing RX buffers (call to mlx4_en_refill_rx_buffers).
*/
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
{
int ring;
if (!priv->port_up)
return;
for (ring = 0; ring < priv->rx_ring_num; ring++) {
if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
napi_reschedule(&priv->rx_cq[ring]->napi);
}
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
......
......@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring->queue_index = queue_index;
if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
cpumask_set_cpu(queue_index, &ring->affinity_mask);
if (queue_index < priv->num_tx_rings_p_up)
cpumask_set_cpu_local_first(queue_index,
priv->mdev->dev->numa_node,
&ring->affinity_mask);
*pring = ring;
return 0;
......@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
if (!user_prio && cpu_online(ring->queue_index))
if (!cpumask_empty(&ring->affinity_mask))
netif_set_xps_queue(priv->dev, &ring->affinity_mask,
ring->queue_index);
......
......@@ -56,11 +56,13 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
#define MLX4_GET(dest, source, offset) \
do { \
void *__p = (char *) (source) + (offset); \
u64 val; \
switch (sizeof (dest)) { \
case 1: (dest) = *(u8 *) __p; break; \
case 2: (dest) = be16_to_cpup(__p); break; \
case 4: (dest) = be32_to_cpup(__p); break; \
case 8: (dest) = be64_to_cpup(__p); break; \
case 8: val = get_unaligned((u64 *)__p); \
(dest) = be64_to_cpu(val); break; \
default: __buggy_use_of_MLX4_GET(); \
} \
} while (0)
......@@ -1605,9 +1607,17 @@ static void get_board_id(void *vsd, char *board_id)
* swaps each 4-byte word before passing it back to
* us. Therefore we need to swab it before printing.
*/
for (i = 0; i < 4; ++i)
((u32 *) board_id)[i] =
swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
u32 *bid_u32 = (u32 *)board_id;
for (i = 0; i < 4; ++i) {
u32 *addr;
u32 val;
addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4);
val = get_unaligned(addr);
val = swab32(val);
put_unaligned(val, &bid_u32[i]);
}
}
}
......
......@@ -774,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node);
......
......@@ -135,7 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
int i, j;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
spin_lock(&adapter->tx_clean_lock);
spin_lock_bh(&adapter->tx_clean_lock);
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
......@@ -159,7 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
}
cmd_buf++;
}
spin_unlock(&adapter->tx_clean_lock);
spin_unlock_bh(&adapter->tx_clean_lock);
}
void netxen_free_sw_resources(struct netxen_adapter *adapter)
......
......@@ -4176,14 +4176,15 @@ static int rocker_port_bridge_setlink(struct net_device *dev,
static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask)
u32 filter_mask, int nlflags)
{
struct rocker_port *rocker_port = netdev_priv(dev);
u16 mode = BRIDGE_MODE_UNDEF;
u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
rocker_port->brport_flags, mask);
rocker_port->brport_flags, mask,
nlflags);
}
static int rocker_port_get_phys_port_name(struct net_device *dev,
......
......@@ -1765,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
ALE_PORT_STATE,
ALE_PORT_STATE_FORWARD);
if (ndev && slave->open)
if (ndev && slave->open &&
slave->link_interface != SGMII_LINK_MAC_PHY &&
slave->link_interface != XGMII_LINK_MAC_PHY)
netif_carrier_on(ndev);
} else {
writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
......@@ -1773,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
ALE_PORT_STATE,
ALE_PORT_STATE_DISABLE);
if (ndev)
if (ndev &&
slave->link_interface != SGMII_LINK_MAC_PHY &&
slave->link_interface != XGMII_LINK_MAC_PHY)
netif_carrier_off(ndev);
}
......
......@@ -128,7 +128,6 @@ struct ndis_tcp_ip_checksum_info;
struct hv_netvsc_packet {
/* Bookkeeping stuff */
u32 status;
bool part_of_skb;
bool is_data_pkt;
bool xmit_more; /* from skb */
......@@ -612,6 +611,15 @@ struct multi_send_data {
u32 count; /* counter of batched packets */
};
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
struct delayed_work dwork;
struct work_struct work;
u32 msg_enable; /* debug level */
};
/* Per netvsc device */
struct netvsc_device {
struct hv_device *dev;
......@@ -667,6 +675,9 @@ struct netvsc_device {
struct multi_send_data msd[NR_CPUS];
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
/* The net device context */
struct net_device_context *nd_ctx;
};
/* NdisInitialize message */
......
......@@ -889,11 +889,6 @@ int netvsc_send(struct hv_device *device,
} else {
packet->page_buf_cnt = 0;
packet->total_data_buflen += msd_len;
if (!packet->part_of_skb) {
skb = (struct sk_buff *)(unsigned long)packet->
send_completion_tid;
packet->send_completion_tid = 0;
}
}
if (msdp->pkt)
......@@ -1197,6 +1192,9 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
*/
ndev = net_device->ndev;
/* Add netvsc_device context to netvsc_device */
net_device->nd_ctx = netdev_priv(ndev);
/* Initialize the NetVSC channel extension */
init_completion(&net_device->channel_init_wait);
......
......@@ -40,18 +40,21 @@
#include "hyperv_net.h"
struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
struct delayed_work dwork;
struct work_struct work;
};
#define RING_SIZE_MIN 64
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR;
static int debug = -1;
module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static void do_set_multicast(struct work_struct *w)
{
struct net_device_context *ndevctx =
......@@ -235,9 +238,6 @@ void netvsc_xmit_completion(void *context)
struct sk_buff *skb = (struct sk_buff *)
(unsigned long)packet->send_completion_tid;
if (!packet->part_of_skb)
kfree(packet);
if (skb)
dev_kfree_skb_any(skb);
}
......@@ -389,7 +389,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
u32 net_trans_info;
u32 hash;