Commit 53a1d0ed authored by Oleksandr Natalenko's avatar Oleksandr Natalenko

fix merge conflict

Signed-off-by: Oleksandr Natalenko's avatarOleksandr Natalenko <oleksandr@natalenko.name>
parents 6f35967a 44995991
......@@ -3398,7 +3398,7 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
/*
* Without TFA we must not use PMC3.
*/
if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
c = dyn_constraint(cpuc, c, idx);
c->idxmsk64 &= ~(1ULL << 3);
c->weight--;
......@@ -4142,7 +4142,7 @@ static struct attribute *intel_pmu_caps_attrs[] = {
NULL
};
DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
static struct attribute *intel_pmu_attrs[] = {
&dev_attr_freeze_on_smi.attr,
......
......@@ -1032,12 +1032,12 @@ static inline int intel_pmu_init(void)
return 0;
}
static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
{
return 0;
}
static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
{
}
......
......@@ -250,6 +250,7 @@ void proc_coredump_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
struct task_struct *parent;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
......@@ -262,8 +263,14 @@ void proc_coredump_connector(struct task_struct *task)
ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid;
ev->event_data.coredump.process_tgid = task->tgid;
ev->event_data.coredump.parent_pid = task->real_parent->pid;
ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
rcu_read_lock();
if (pid_alive(task)) {
parent = rcu_dereference(task->real_parent);
ev->event_data.coredump.parent_pid = parent->pid;
ev->event_data.coredump.parent_tgid = parent->tgid;
}
rcu_read_unlock();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
......@@ -276,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
struct task_struct *parent;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
......@@ -290,8 +298,14 @@ void proc_exit_connector(struct task_struct *task)
ev->event_data.exit.process_tgid = task->tgid;
ev->event_data.exit.exit_code = task->exit_code;
ev->event_data.exit.exit_signal = task->exit_signal;
ev->event_data.exit.parent_pid = task->real_parent->pid;
ev->event_data.exit.parent_tgid = task->real_parent->tgid;
rcu_read_lock();
if (pid_alive(task)) {
parent = rcu_dereference(task->real_parent);
ev->event_data.exit.parent_pid = parent->pid;
ev->event_data.exit.parent_tgid = parent->tgid;
}
rcu_read_unlock();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
......
......@@ -1611,6 +1611,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
if (old_plane_state->fb != new_plane_state->fb)
return -EINVAL;
/*
* FIXME: Since prepare_fb and cleanup_fb are always called on
* the new_plane_state for async updates we need to block framebuffer
* changes. This prevents use of a fb that's been cleaned up and
* double cleanups from occuring.
*/
if (old_plane_state->fb != new_plane_state->fb)
return -EINVAL;
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
......
......@@ -4670,7 +4670,6 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
generic_make_request(read_bio);
sector_nr += nr_sectors;
sectors_done += nr_sectors;
if (sector_nr <= last)
goto read_more;
......
......@@ -559,6 +559,9 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
goto restore_link;
}
if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode)
mode = chip->info->ops->port_max_speed_mode(port);
if (chip->info->ops->port_set_pause) {
err = chip->info->ops->port_set_pause(chip, port, pause);
if (err)
......@@ -3042,6 +3045,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6341_port_set_speed,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
......@@ -3360,6 +3364,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390_port_set_speed,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
......@@ -3404,6 +3409,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390x_port_set_speed,
.port_max_speed_mode = mv88e6390x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
......@@ -3448,6 +3454,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390_port_set_speed,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
......@@ -3541,6 +3548,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390_port_set_speed,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
......@@ -3672,6 +3680,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6341_port_set_speed,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
......@@ -3847,6 +3856,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390_port_set_speed,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
......@@ -3895,6 +3905,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390x_port_set_speed,
.port_max_speed_mode = mv88e6390x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
......
......@@ -377,6 +377,9 @@ struct mv88e6xxx_ops {
*/
int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed);
/* What interface mode should be used for maximum speed? */
phy_interface_t (*port_max_speed_mode)(int port);
int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port);
int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
......
......@@ -312,6 +312,14 @@ int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
}
phy_interface_t mv88e6341_port_max_speed_mode(int port)
{
if (port == 5)
return PHY_INTERFACE_MODE_2500BASEX;
return PHY_INTERFACE_MODE_NA;
}
/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
{
......@@ -345,6 +353,14 @@ int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
}
phy_interface_t mv88e6390_port_max_speed_mode(int port)
{
if (port == 9 || port == 10)
return PHY_INTERFACE_MODE_2500BASEX;
return PHY_INTERFACE_MODE_NA;
}
/* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */
int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
{
......@@ -360,6 +376,14 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
}
phy_interface_t mv88e6390x_port_max_speed_mode(int port)
{
if (port == 9 || port == 10)
return PHY_INTERFACE_MODE_XAUI;
return PHY_INTERFACE_MODE_NA;
}
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
......
......@@ -285,6 +285,10 @@ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
phy_interface_t mv88e6341_port_max_speed_mode(int port);
phy_interface_t mv88e6390_port_max_speed_mode(int port);
phy_interface_t mv88e6390x_port_max_speed_mode(int port);
int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state);
int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map);
......
......@@ -192,6 +192,7 @@ struct hnae3_ae_dev {
const struct hnae3_ae_ops *ops;
struct list_head node;
u32 flag;
u8 override_pci_need_reset; /* fix to stop multiple reset happening */
enum hnae3_dev_type dev_type;
enum hnae3_reset_type reset_type;
void *priv;
......
......@@ -1852,7 +1852,9 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
/* request the reset */
if (ae_dev->ops->reset_event) {
ae_dev->ops->reset_event(pdev, NULL);
if (!ae_dev->override_pci_need_reset)
ae_dev->ops->reset_event(pdev, NULL);
return PCI_ERS_RESULT_RECOVERED;
}
......@@ -2476,6 +2478,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* make sure HW write desc complete */
dma_rmb();
if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
return -ENXIO;
......
......@@ -1259,8 +1259,10 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
hclge_handle_all_ras_errors(hdev);
} else {
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
hdev->pdev->revision < 0x21)
hdev->pdev->revision < 0x21) {
ae_dev->override_pci_need_reset = 1;
return PCI_ERS_RESULT_RECOVERED;
}
}
if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
......@@ -1269,8 +1271,11 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
}
if (status & HCLGE_RAS_REG_NFE_MASK ||
status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
ae_dev->override_pci_need_reset = 0;
return PCI_ERS_RESULT_NEED_RESET;
}
ae_dev->override_pci_need_reset = 1;
return PCI_ERS_RESULT_RECOVERED;
}
......
......@@ -2645,6 +2645,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
if (!priv->cmd.context)
return -ENOMEM;
if (mlx4_is_mfunc(dev))
mutex_lock(&priv->cmd.slave_cmd_mutex);
down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
......@@ -2670,6 +2672,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
down(&priv->cmd.poll_sem);
priv->cmd.use_events = 1;
up_write(&priv->cmd.switch_sem);
if (mlx4_is_mfunc(dev))
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return err;
}
......@@ -2682,6 +2686,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
if (mlx4_is_mfunc(dev))
mutex_lock(&priv->cmd.slave_cmd_mutex);
down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;
......@@ -2689,9 +2695,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
down(&priv->cmd.event_sem);
kfree(priv->cmd.context);
priv->cmd.context = NULL;
up(&priv->cmd.poll_sem);
up_write(&priv->cmd.switch_sem);
if (mlx4_is_mfunc(dev))
mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
......
......@@ -2719,13 +2719,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
int total_pages;
int total_mem;
int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
int tot;
sq_size = 1 << (log_sq_size + log_sq_sride + 4);
rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
total_mem = sq_size + rq_size;
total_pages =
roundup_pow_of_two((total_mem + (page_offset << 6)) >>
page_shift);
tot = (total_mem + (page_offset << 6)) >> page_shift;
total_pages = !tot ? 1 : roundup_pow_of_two(tot);
return total_pages;
}
......
......@@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
if (adapter->csr.flags &
LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
......@@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
/* map TX interrupt to vector */
int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
if (flags &
LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
int_vec_en_auto_clr |= INT_VEC_EN_(vector);
lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
int_vec_en_auto_clr);
}
/* Remove TX interrupt from shared mask */
intr->vector_list[0].int_mask &= ~int_bit;
......@@ -1902,7 +1895,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
return ((++index) % rx->ring_size);
}
static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
{
int length = 0;
length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
return __netdev_alloc_skb(rx->adapter->netdev,
length, GFP_ATOMIC | GFP_DMA);
}
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
struct sk_buff *skb)
{
struct lan743x_rx_buffer_info *buffer_info;
struct lan743x_rx_descriptor *descriptor;
......@@ -1911,9 +1914,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
length,
GFP_ATOMIC | GFP_DMA);
buffer_info->skb = skb;
if (!(buffer_info->skb))
return -ENOMEM;
buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
......@@ -2060,8 +2061,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
/* packet is available */
if (first_index == last_index) {
/* single buffer packet */
struct sk_buff *new_skb = NULL;
int packet_length;
new_skb = lan743x_rx_allocate_skb(rx);
if (!new_skb) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
*/
lan743x_rx_reuse_ring_element(rx, first_index);
goto process_extension;
}
buffer_info = &rx->buffer_info[first_index];
skb = buffer_info->skb;
descriptor = &rx->ring_cpu_ptr[first_index];
......@@ -2081,7 +2093,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
skb_put(skb, packet_length - 4);
skb->protocol = eth_type_trans(skb,
rx->adapter->netdev);
lan743x_rx_allocate_ring_element(rx, first_index);
lan743x_rx_init_ring_element(rx, first_index, new_skb);
} else {
int index = first_index;
......@@ -2094,26 +2106,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
if (first_index <= last_index) {
while ((index >= first_index) &&
(index <= last_index)) {
lan743x_rx_release_ring_element(rx,
index);
lan743x_rx_allocate_ring_element(rx,
index);
lan743x_rx_reuse_ring_element(rx,
index);
index = lan743x_rx_next_index(rx,
index);
}
} else {
while ((index >= first_index) ||
(index <= last_index)) {
lan743x_rx_release_ring_element(rx,
index);
lan743x_rx_allocate_ring_element(rx,
index);
lan743x_rx_reuse_ring_element(rx,
index);
index = lan743x_rx_next_index(rx,
index);
}
}
}
process_extension:
if (extension_index >= 0) {
descriptor = &rx->ring_cpu_ptr[extension_index];
buffer_info = &rx->buffer_info[extension_index];
......@@ -2290,7 +2299,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
ret = lan743x_rx_allocate_ring_element(rx, index);
struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
ret = lan743x_rx_init_ring_element(rx, index, new_skb);
if (ret)
goto cleanup;
}
......
......@@ -458,7 +458,7 @@ static int ravb_dmac_init(struct net_device *ndev)
RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
/* Set FIFO size */
ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
/* Timestamp enable */
ravb_write(ndev, TCCR_TFEN, TCCR);
......
......@@ -532,6 +532,7 @@ static void pptp_sock_destruct(struct sock *sk)
pppox_unbind_sock(sk);
}
skb_queue_purge(&sk->sk_receive_queue);
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
}
static int pptp_create(struct net *net, struct socket *sock, int kern)
......
......@@ -1657,6 +1657,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
rcu_read_lock();
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
atomic_long_inc(&vxlan->dev->rx_dropped);
goto drop;
}
stats = this_cpu_ptr(vxlan->dev->tstats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
......@@ -1664,6 +1672,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
u64_stats_update_end(&stats->syncp);
gro_cells_receive(&vxlan->gro_cells, skb);
rcu_read_unlock();
return 0;
drop:
......@@ -2693,6 +2704,8 @@ static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
gro_cells_destroy(&vxlan->gro_cells);
vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
free_percpu(dev->tstats);
......@@ -3794,7 +3807,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
vxlan_flush(vxlan, true);
gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
}
......
......@@ -1750,10 +1750,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (!get_dirty_pages(inode))
goto skip_flush;
f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
/*
* Should wait end_io to count F2FS_WB_CP_DATA correctly by
* f2fs_is_atomic_file.
*/
if (get_dirty_pages(inode))
f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
"Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
......@@ -1761,7 +1763,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
}
skip_flush:
set_inode_flag(inode, FI_ATOMIC_FILE);
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
......
......@@ -13,22 +13,36 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct gro_cell *cell;
int res;
if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev))
return netif_rx(skb);
rcu_read_lock();
if (unlikely(!(dev->flags & IFF_UP)))
goto drop;
if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
res = netif_rx(skb);
goto unlock;
}
cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
drop:
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
res = NET_RX_DROP;
goto unlock;
}
__skb_queue_tail(&cell->napi_skbs, skb);
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
return NET_RX_SUCCESS;
res = NET_RX_SUCCESS;
unlock:
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL(gro_cells_receive);
......
......@@ -94,9 +94,8 @@ static void hsr_check_announce(struct net_device *hsr_dev,
&& (old_operstate != IF_OPER_UP)) {
/* Went up */
hsr->announce_count = 0;
hsr->announce_timer.expires = jiffies +
msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
add_timer(&hsr->announce_timer);
mod_timer(&hsr->announce_timer,
jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
}
if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
......@@ -332,6 +331,7 @@ static void hsr_announce(struct timer_list *t)
{
struct hsr_priv *hsr;
struct hsr_port *master;
unsigned long interval;
hsr = from_timer(hsr, t, announce_timer);
......@@ -343,18 +343,16 @@ static void hsr_announce(struct timer_list *t)
hsr->protVersion);
hsr->announce_count++;
hsr->announce_timer.expires = jiffies +
msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
} else {
send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
hsr->protVersion);
hsr->announce_timer.expires = jiffies +
msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
}
if (is_admin_up(master->dev))
add_timer(&hsr->announce_timer);
mod_timer(&hsr->announce_timer, jiffies + interval);
rcu_read_unlock();
}
......@@ -486,7 +484,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
if (res)
return res;
goto err_add_port;
res = register_netdevice(hsr_dev);
if (res)
......@@ -506,6 +504,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
fail:
hsr_for_each_port(hsr, port)
hsr_del_port(port);
err_add_port:
hsr_del_node(&hsr->self_node_db);
return res;
}
......@@ -124,6 +124,18 @@ int hsr_create_self_node(struct list_head *self_node_db,
return 0;
}
void hsr_del_node(struct list_head *self_node_db)
{
struct hsr_node *node;
rcu_read_lock();
node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
rcu_read_unlock();
if (node) {
list_del_rcu(&node->mac_list);
kfree(node);
}
}
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
* seq_out is used to initialize filtering of outgoing duplicate frames
......
......@@ -16,6 +16,7 @@
struct hsr_node;
void hsr_del_node(struct list_head *self_node_db);
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
u16 seq_out);
struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
......
......@@ -1024,7 +1024,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
int ret;
len = sizeof(struct udphdr) + sizeof(struct guehdr);
if (!pskb_may_pull(skb, len))
if (!pskb_may_pull(skb, transport_offset + len))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
......@@ -1059,7 +1059,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
optlen = guehdr->hlen << 2;
if (!pskb_may_pull(skb, len + optlen))
if (!pskb_may_pull(skb, transport_offset + len + optlen))
return -EINVAL;