Commit 272911b8 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix mvneta/bm dependencies, from Arnd Bergmann.

 2) RX completion hw bug workaround in bnxt_en, from Michael Chan.

 3) Kernel pointer leak in nf_conntrack, from Linus.

 4) Hoplimit route attribute limits not enforced properly, from Paolo
    Abeni.

 5) qlcnic driver NULL deref fix from Dan Carpenter.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  arm64: bpf: jit JMP_JSET_{X,K}
  net/route: enforce hoplimit max value
  nf_conntrack: avoid kernel pointer value leak in slab name
  drivers: net: xgene: fix register offset
  drivers: net: xgene: fix statistics counters race condition
  drivers: net: xgene: fix ununiform latency across queues
  drivers: net: xgene: fix sharing of irqs
  drivers: net: xgene: fix IPv4 forward crash
  xen-netback: fix extra_info handling in xenvif_tx_err()
  net: mvneta: bm: fix dependencies again
  bnxt_en: Add workaround to detect bad opaque in rx completion (part 2)
  bnxt_en: Add workaround to detect bad opaque in rx completion (part 1)
  qlcnic: potential NULL dereference in qlcnic_83xx_get_minidump_template()
parents 6ba5b85f 98397fc5
...@@ -476,6 +476,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -476,6 +476,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_JGE: case BPF_JGE:
jmp_cond = A64_COND_CS; jmp_cond = A64_COND_CS;
break; break;
case BPF_JSET:
case BPF_JNE: case BPF_JNE:
jmp_cond = A64_COND_NE; jmp_cond = A64_COND_NE;
break; break;
......
...@@ -43,6 +43,7 @@ static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel, ...@@ -43,6 +43,7 @@ static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel,
static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
struct xgene_cle_dbptr *dbptr, u32 *buf) struct xgene_cle_dbptr *dbptr, u32 *buf)
{ {
buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
SET_VAL(CLE_DSTQIDL, dbptr->dstqid); SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
...@@ -412,7 +413,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) ...@@ -412,7 +413,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
.branch = { .branch = {
{ {
/* IPV4 */ /* IPV4 */
.valid = 0, .valid = 1,
.next_packet_pointer = 22, .next_packet_pointer = 22,
.jump_bw = JMP_FW, .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, .jump_rel = JMP_ABS,
...@@ -420,7 +421,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) ...@@ -420,7 +421,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
.next_node = PKT_PROT_NODE, .next_node = PKT_PROT_NODE,
.next_branch = 0, .next_branch = 0,
.data = 0x8, .data = 0x8,
.mask = 0xffff .mask = 0x0
}, },
{ {
.valid = 0, .valid = 0,
...@@ -456,7 +457,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) ...@@ -456,7 +457,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
.next_node = RSS_IPV4_TCP_NODE, .next_node = RSS_IPV4_TCP_NODE,
.next_branch = 0, .next_branch = 0,
.data = 0x0600, .data = 0x0600,
.mask = 0xffff .mask = 0x00ff
}, },
{ {
/* UDP */ /* UDP */
...@@ -468,7 +469,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) ...@@ -468,7 +469,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
.next_node = RSS_IPV4_UDP_NODE, .next_node = RSS_IPV4_UDP_NODE,
.next_branch = 0, .next_branch = 0,
.data = 0x1100, .data = 0x1100,
.mask = 0xffff .mask = 0x00ff
}, },
{ {
.valid = 0, .valid = 0,
...@@ -642,7 +643,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) ...@@ -642,7 +643,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
{ {
/* TCP DST Port */ /* TCP DST Port */
.valid = 0, .valid = 0,
.next_packet_pointer = 256, .next_packet_pointer = 258,
.jump_bw = JMP_FW, .jump_bw = JMP_FW,
.jump_rel = JMP_ABS, .jump_rel = JMP_ABS,
.operation = EQT, .operation = EQT,
......
...@@ -83,6 +83,8 @@ ...@@ -83,6 +83,8 @@
#define CLE_TYPE_POS 0 #define CLE_TYPE_POS 0
#define CLE_TYPE_LEN 2 #define CLE_TYPE_LEN 2
#define CLE_DROP_POS 28
#define CLE_DROP_LEN 1
#define CLE_DSTQIDL_POS 25 #define CLE_DSTQIDL_POS 25
#define CLE_DSTQIDL_LEN 7 #define CLE_DSTQIDL_LEN 7
#define CLE_DSTQIDH_POS 0 #define CLE_DSTQIDH_POS 0
......
...@@ -219,27 +219,30 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, ...@@ -219,27 +219,30 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata, struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status) enum xgene_enet_err_code status)
{ {
struct rtnl_link_stats64 *stats = &pdata->stats;
switch (status) { switch (status) {
case INGRESS_CRC: case INGRESS_CRC:
stats->rx_crc_errors++; ring->rx_crc_errors++;
ring->rx_dropped++;
break; break;
case INGRESS_CHECKSUM: case INGRESS_CHECKSUM:
case INGRESS_CHECKSUM_COMPUTE: case INGRESS_CHECKSUM_COMPUTE:
stats->rx_errors++; ring->rx_errors++;
ring->rx_dropped++;
break; break;
case INGRESS_TRUNC_FRAME: case INGRESS_TRUNC_FRAME:
stats->rx_frame_errors++; ring->rx_frame_errors++;
ring->rx_dropped++;
break; break;
case INGRESS_PKT_LEN: case INGRESS_PKT_LEN:
stats->rx_length_errors++; ring->rx_length_errors++;
ring->rx_dropped++;
break; break;
case INGRESS_PKT_UNDER: case INGRESS_PKT_UNDER:
stats->rx_frame_errors++; ring->rx_frame_errors++;
ring->rx_dropped++;
break; break;
case INGRESS_FIFO_OVERRUN: case INGRESS_FIFO_OVERRUN:
stats->rx_fifo_errors++; ring->rx_fifo_errors++;
break; break;
default: default:
break; break;
......
...@@ -86,7 +86,7 @@ enum xgene_enet_rm { ...@@ -86,7 +86,7 @@ enum xgene_enet_rm {
#define RINGADDRL_POS 5 #define RINGADDRL_POS 5
#define RINGADDRL_LEN 27 #define RINGADDRL_LEN 27
#define RINGADDRH_POS 0 #define RINGADDRH_POS 0
#define RINGADDRH_LEN 6 #define RINGADDRH_LEN 7
#define RINGSIZE_POS 23 #define RINGSIZE_POS 23
#define RINGSIZE_LEN 3 #define RINGSIZE_LEN 3
#define RINGTYPE_POS 19 #define RINGTYPE_POS 19
...@@ -94,9 +94,9 @@ enum xgene_enet_rm { ...@@ -94,9 +94,9 @@ enum xgene_enet_rm {
#define RINGMODE_POS 20 #define RINGMODE_POS 20
#define RINGMODE_LEN 3 #define RINGMODE_LEN 3
#define RECOMTIMEOUTL_POS 28 #define RECOMTIMEOUTL_POS 28
#define RECOMTIMEOUTL_LEN 3 #define RECOMTIMEOUTL_LEN 4
#define RECOMTIMEOUTH_POS 0 #define RECOMTIMEOUTH_POS 0
#define RECOMTIMEOUTH_LEN 2 #define RECOMTIMEOUTH_LEN 3
#define NUMMSGSINQ_POS 1 #define NUMMSGSINQ_POS 1
#define NUMMSGSINQ_LEN 16 #define NUMMSGSINQ_LEN 16
#define ACCEPTLERR BIT(19) #define ACCEPTLERR BIT(19)
...@@ -201,6 +201,8 @@ enum xgene_enet_rm { ...@@ -201,6 +201,8 @@ enum xgene_enet_rm {
#define USERINFO_LEN 32 #define USERINFO_LEN 32
#define FPQNUM_POS 32 #define FPQNUM_POS 32
#define FPQNUM_LEN 12 #define FPQNUM_LEN 12
#define ELERR_POS 46
#define ELERR_LEN 2
#define NV_POS 50 #define NV_POS 50
#define NV_LEN 1 #define NV_LEN 1
#define LL_POS 51 #define LL_POS 51
......
...@@ -443,8 +443,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, ...@@ -443,8 +443,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
pdata->stats.tx_packets++; tx_ring->tx_packets++;
pdata->stats.tx_bytes += skb->len; tx_ring->tx_bytes += skb->len;
pdata->ring_ops->wr_cmd(tx_ring, count); pdata->ring_ops->wr_cmd(tx_ring, count);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -483,12 +483,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, ...@@ -483,12 +483,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
skb = buf_pool->rx_skb[skb_index]; skb = buf_pool->rx_skb[skb_index];
/* checking for error */ /* checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) { if (unlikely(status > 2)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
status); status);
pdata->stats.rx_dropped++;
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
...@@ -506,8 +506,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, ...@@ -506,8 +506,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
xgene_enet_skip_csum(skb); xgene_enet_skip_csum(skb);
} }
pdata->stats.rx_packets++; rx_ring->rx_packets++;
pdata->stats.rx_bytes += datalen; rx_ring->rx_bytes += datalen;
napi_gro_receive(&rx_ring->napi, skb); napi_gro_receive(&rx_ring->napi, skb);
out: out:
if (--rx_ring->nbufpool == 0) { if (--rx_ring->nbufpool == 0) {
...@@ -630,7 +630,7 @@ static int xgene_enet_register_irq(struct net_device *ndev) ...@@ -630,7 +630,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
ring = pdata->rx_ring[i]; ring = pdata->rx_ring[i];
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring); 0, ring->irq_name, ring);
if (ret) { if (ret) {
netdev_err(ndev, "Failed to request irq %s\n", netdev_err(ndev, "Failed to request irq %s\n",
ring->irq_name); ring->irq_name);
...@@ -641,7 +641,7 @@ static int xgene_enet_register_irq(struct net_device *ndev) ...@@ -641,7 +641,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
ring = pdata->tx_ring[i]->cp_ring; ring = pdata->tx_ring[i]->cp_ring;
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring); 0, ring->irq_name, ring);
if (ret) { if (ret) {
netdev_err(ndev, "Failed to request irq %s\n", netdev_err(ndev, "Failed to request irq %s\n",
ring->irq_name); ring->irq_name);
...@@ -1114,12 +1114,31 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64( ...@@ -1114,12 +1114,31 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64(
{ {
struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct rtnl_link_stats64 *stats = &pdata->stats; struct rtnl_link_stats64 *stats = &pdata->stats;
struct xgene_enet_desc_ring *ring;
int i;
stats->rx_errors += stats->rx_length_errors + memset(stats, 0, sizeof(struct rtnl_link_stats64));
stats->rx_crc_errors + for (i = 0; i < pdata->txq_cnt; i++) {
stats->rx_frame_errors + ring = pdata->tx_ring[i];
stats->rx_fifo_errors; if (ring) {
memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); stats->tx_packets += ring->tx_packets;
stats->tx_bytes += ring->tx_bytes;
}
}
for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i];
if (ring) {
stats->rx_packets += ring->rx_packets;
stats->rx_bytes += ring->rx_bytes;
stats->rx_errors += ring->rx_length_errors +
ring->rx_crc_errors +
ring->rx_frame_errors +
ring->rx_fifo_errors;
stats->rx_dropped += ring->rx_dropped;
}
}
memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
return storage; return storage;
} }
...@@ -1234,6 +1253,13 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) ...@@ -1234,6 +1253,13 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
for (i = 0; i < max_irqs; i++) { for (i = 0; i < max_irqs; i++) {
ret = platform_get_irq(pdev, i); ret = platform_get_irq(pdev, i);
if (ret <= 0) { if (ret <= 0) {
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
max_irqs = i;
pdata->rxq_cnt = max_irqs / 2;
pdata->txq_cnt = max_irqs / 2;
pdata->cq_cnt = max_irqs / 2;
break;
}
dev_err(dev, "Unable to get ENET IRQ\n"); dev_err(dev, "Unable to get ENET IRQ\n");
ret = ret ? : -ENXIO; ret = ret ? : -ENXIO;
return ret; return ret;
...@@ -1437,19 +1463,28 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) ...@@ -1437,19 +1463,28 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
pdata->port_ops = &xgene_xgport_ops; pdata->port_ops = &xgene_xgport_ops;
pdata->cle_ops = &xgene_cle3in_ops; pdata->cle_ops = &xgene_cle3in_ops;
pdata->rm = RM0; pdata->rm = RM0;
pdata->rxq_cnt = XGENE_NUM_RX_RING; if (!pdata->rxq_cnt) {
pdata->txq_cnt = XGENE_NUM_TX_RING; pdata->rxq_cnt = XGENE_NUM_RX_RING;
pdata->cq_cnt = XGENE_NUM_TXC_RING; pdata->txq_cnt = XGENE_NUM_TX_RING;
pdata->cq_cnt = XGENE_NUM_TXC_RING;
}
break; break;
} }
if (pdata->enet_id == XGENE_ENET1) { if (pdata->enet_id == XGENE_ENET1) {
switch (pdata->port_id) { switch (pdata->port_id) {
case 0: case 0:
pdata->cpu_bufnum = START_CPU_BUFNUM_0; if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
pdata->eth_bufnum = START_ETH_BUFNUM_0; pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
pdata->bp_bufnum = START_BP_BUFNUM_0; pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
pdata->ring_num = START_RING_NUM_0; pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
pdata->ring_num = START_RING_NUM_0;
} else {
pdata->cpu_bufnum = START_CPU_BUFNUM_0;
pdata->eth_bufnum = START_ETH_BUFNUM_0;
pdata->bp_bufnum = START_BP_BUFNUM_0;
pdata->ring_num = START_RING_NUM_0;
}
break; break;
case 1: case 1:
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
......
...@@ -49,10 +49,10 @@ ...@@ -49,10 +49,10 @@
#define XGENE_ENET_MSS 1448 #define XGENE_ENET_MSS 1448
#define XGENE_MIN_ENET_FRAME_SIZE 60 #define XGENE_MIN_ENET_FRAME_SIZE 60
#define XGENE_MAX_ENET_IRQ 8 #define XGENE_MAX_ENET_IRQ 16
#define XGENE_NUM_RX_RING 4 #define XGENE_NUM_RX_RING 8
#define XGENE_NUM_TX_RING 4 #define XGENE_NUM_TX_RING 8
#define XGENE_NUM_TXC_RING 4 #define XGENE_NUM_TXC_RING 8
#define START_CPU_BUFNUM_0 0 #define START_CPU_BUFNUM_0 0
#define START_ETH_BUFNUM_0 2 #define START_ETH_BUFNUM_0 2
...@@ -121,6 +121,16 @@ struct xgene_enet_desc_ring { ...@@ -121,6 +121,16 @@ struct xgene_enet_desc_ring {
struct xgene_enet_raw_desc16 *raw_desc16; struct xgene_enet_raw_desc16 *raw_desc16;
}; };
__le64 *exp_bufs; __le64 *exp_bufs;
u64 tx_packets;
u64 tx_bytes;
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
u64 rx_errors;
u64 rx_length_errors;
u64 rx_crc_errors;
u64 rx_frame_errors;
u64 rx_fifo_errors;
}; };
struct xgene_mac_ops { struct xgene_mac_ops {
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#define LINK_STATUS BIT(2) #define LINK_STATUS BIT(2)
#define LINK_UP BIT(15) #define LINK_UP BIT(15)
#define MPA_IDLE_WITH_QMI_EMPTY BIT(12) #define MPA_IDLE_WITH_QMI_EMPTY BIT(12)
#define SG_RX_DV_GATE_REG_0_ADDR 0x0dfc #define SG_RX_DV_GATE_REG_0_ADDR 0x05fc
extern const struct xgene_mac_ops xgene_sgmac_ops; extern const struct xgene_mac_ops xgene_sgmac_ops;
extern const struct xgene_port_ops xgene_sgport_ops; extern const struct xgene_port_ops xgene_sgport_ops;
......
...@@ -813,6 +813,46 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, ...@@ -813,6 +813,46 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
return skb; return skb;
} }
static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
u32 *raw_cons, void *cmp)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct rx_cmp *rxcmp = cmp;
u32 tmp_raw_cons = *raw_cons;
u8 cmp_type, agg_bufs = 0;
cmp_type = RX_CMP_TYPE(rxcmp);
if (cmp_type == CMP_TYPE_RX_L2_CMP) {
agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
RX_CMP_AGG_BUFS) >>
RX_CMP_AGG_BUFS_SHIFT;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp;
agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
RX_TPA_END_CMP_AGG_BUFS) >>
RX_TPA_END_CMP_AGG_BUFS_SHIFT;
}
if (agg_bufs) {
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
return -EBUSY;
}
*raw_cons = tmp_raw_cons;
return 0;
}
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
}
rxr->rx_next_cons = 0xffff;
}
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_tpa_start_cmp *tpa_start, struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1) struct rx_tpa_start_cmp_ext *tpa_start1)
...@@ -830,6 +870,11 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -830,6 +870,11 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
prod_rx_buf = &rxr->rx_buf_ring[prod]; prod_rx_buf = &rxr->rx_buf_ring[prod];
tpa_info = &rxr->rx_tpa[agg_id]; tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons)) {
bnxt_sched_reset(bp, rxr);
return;
}
prod_rx_buf->data = tpa_info->data; prod_rx_buf->data = tpa_info->data;
mapping = tpa_info->mapping; mapping = tpa_info->mapping;
...@@ -867,6 +912,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -867,6 +912,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
rxr->rx_prod = NEXT_RX(prod); rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons); cons = NEXT_RX(cons);
rxr->rx_next_cons = NEXT_RX(cons);
cons_rx_buf = &rxr->rx_buf_ring[cons]; cons_rx_buf = &rxr->rx_buf_ring[cons];
bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
...@@ -980,6 +1026,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, ...@@ -980,6 +1026,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
dma_addr_t mapping; dma_addr_t mapping;
struct sk_buff *skb; struct sk_buff *skb;
if (unlikely(bnapi->in_reset)) {
int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
if (rc < 0)
return ERR_PTR(-EBUSY);
return NULL;
}
tpa_info = &rxr->rx_tpa[agg_id]; tpa_info = &rxr->rx_tpa[agg_id];
data = tpa_info->data; data = tpa_info->data;
prefetch(data); prefetch(data);
...@@ -1146,6 +1200,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, ...@@ -1146,6 +1200,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
cons = rxcmp->rx_cmp_opaque; cons = rxcmp->rx_cmp_opaque;
rx_buf = &rxr->rx_buf_ring[cons]; rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data; data = rx_buf->data;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
bnxt_sched_reset(bp, rxr);
return rc1;
}
prefetch(data); prefetch(data);
agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
...@@ -1245,6 +1305,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, ...@@ -1245,6 +1305,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
next_rx: next_rx:
rxr->rx_prod = NEXT_RX(prod); rxr->rx_prod = NEXT_RX(prod);
rxr->rx_next_cons = NEXT_RX(cons);
next_rx_no_prod: next_rx_no_prod:
*raw_cons = tmp_raw_cons; *raw_cons = tmp_raw_cons;
...@@ -2486,6 +2547,7 @@ static void bnxt_clear_ring_indices(struct bnxt *bp) ...@@ -2486,6 +2547,7 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
rxr->rx_prod = 0; rxr->rx_prod = 0;
rxr->rx_agg_prod = 0; rxr->rx_agg_prod = 0;
rxr->rx_sw_agg_prod = 0; rxr->rx_sw_agg_prod = 0;
rxr->rx_next_cons = 0;
} }
} }
} }
...@@ -4462,6 +4524,7 @@ static void bnxt_enable_napi(struct bnxt *bp) ...@@ -4462,6 +4524,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
int i; int i;
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
bp->bnapi[i]->in_reset = false;
bnxt_enable_poll(bp->bnapi[i]); bnxt_enable_poll(bp->bnapi[i]);
napi_enable(&bp->bnapi[i]->napi); napi_enable(&bp->bnapi[i]->napi);
} }
......
...@@ -584,6 +584,7 @@ struct bnxt_rx_ring_info { ...@@ -584,6 +584,7 @@ struct bnxt_rx_ring_info {
u16 rx_prod; u16 rx_prod;
u16 rx_agg_prod; u16 rx_agg_prod;
u16 rx_sw_agg_prod; u16 rx_sw_agg_prod;
u16 rx_next_cons;
void __iomem *rx_doorbell; void __iomem *rx_doorbell;
void __iomem *rx_agg_doorbell; void __iomem *rx_agg_doorbell;
...@@ -636,6 +637,7 @@ struct bnxt_napi { ...@@ -636,6 +637,7 @@ struct bnxt_napi {
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
atomic_t poll_state; atomic_t poll_state;
#endif #endif
bool in_reset;
}; };
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
......
...@@ -68,7 +68,7 @@ config MVNETA ...@@ -68,7 +68,7 @@ config MVNETA
config MVNETA_BM config MVNETA_BM
tristate tristate
default y if MVNETA=y && MVNETA_BM_ENABLE default y if MVNETA=y && MVNETA_BM_ENABLE!=n
default MVNETA_BM_ENABLE default MVNETA_BM_ENABLE
select HWBM select HWBM
help help
......
...@@ -1417,6 +1417,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) ...@@ -1417,6 +1417,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
bool extended = false; bool extended = false;
int ret;
prev_version = adapter->fw_version; prev_version = adapter->fw_version;
current_version = qlcnic_83xx_get_fw_version(adapter); current_version = qlcnic_83xx_get_fw_version(adapter);
...@@ -1427,8 +1428,11 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) ...@@ -1427,8 +1428,11 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
extended = !qlcnic_83xx_extend_md_capab(adapter); extended = !qlcnic_83xx_extend_md_capab(adapter);
if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
dev_info(&pdev->dev, "Supports FW dump capability\n"); if (ret)
return;
dev_info(&pdev->dev, "Supports FW dump capability\n");
/* Once we have minidump template with extended iSCSI dump /* Once we have minidump template with extended iSCSI dump
* capability, update the minidump capture mask to 0x1f as * capability, update the minidump capture mask to 0x1f as
......
...@@ -711,6 +711,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue, ...@@ -711,6 +711,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
if (cons == end) if (cons == end)
break; break;
RING_COPY_REQUEST(&queue->tx, cons++, txp); RING_COPY_REQUEST(&queue->tx, cons++, txp);
extra_count = 0; /* only the first frag can have extras */
} while (1); } while (1);
queue->tx.req_cons = cons; queue->tx.req_cons = cons;
} }
......
...@@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg) ...@@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
val = 65535 - 40; val = 65535 - 40;
if (type == RTAX_MTU && val > 65535 - 15) if (type == RTAX_MTU && val > 65535 - 15)
val = 65535 - 15; val = 65535 - 15;
if (type == RTAX_HOPLIMIT && val > 255)
val = 255;
if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
return -EINVAL; return -EINVAL;
fi->fib_metrics[type - 1] = val; fi->fib_metrics[type - 1] = val;
......
...@@ -1750,6 +1750,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc, ...@@ -1750,6 +1750,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
} else { } else {
val = nla_get_u32(nla); val = nla_get_u32(nla);
} }
if (type == RTAX_HOPLIMIT && val > 255)
val = 255;
if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
goto err; goto err;
......
...@@ -1778,6 +1778,7 @@ void nf_conntrack_init_end(void) ...@@ -1778,6 +1778,7 @@ void nf_conntrack_init_end(void)
int nf_conntrack_init_net(struct net *net) int nf_conntrack_init_net(struct net *net)
{ {
static atomic64_t unique_id;
int ret = -ENOMEM; int ret = -ENOMEM;
int cpu; int cpu;
...@@ -1800,7 +1801,8 @@ int nf_conntrack_init_net(struct net *net) ...@@ -1800,7 +1801,8 @@ int nf_conntrack_init_net(struct net *net)
if (!net->ct.stat) if (!net->ct.stat)
goto err_pcpu_lists; goto err_pcpu_lists;
net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
(u64)atomic64_inc_return(&unique_id));
if (!net->ct.slabname) if (!net->ct.slabname)
goto err_slabname; goto err_slabname;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment