Commit 70b1aca3 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'intel-wired-lan-driver-updates-2023-10-19-idpf'

Jacob Keller says:

====================
Intel Wired LAN Driver Updates 2023-10-19 (idpf)

This series contains two fixes for the recently merged idpf driver.

Michal adds missing logic for programming the scheduling mode of completion
queues.

Pavan fixes a call trace caused by the mailbox work item not being canceled
properly if an error occurred during initialization.
====================

Link: https://lore.kernel.org/r/20231023202655.173369-1-jacob.e.keller@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 81a41698 46d913d4
...@@ -1160,6 +1160,7 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) ...@@ -1160,6 +1160,7 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
*/ */
static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
{ {
bool flow_sch_en;
int err, i; int err, i;
vport->txq_grps = kcalloc(vport->num_txq_grp, vport->txq_grps = kcalloc(vport->num_txq_grp,
...@@ -1167,6 +1168,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) ...@@ -1167,6 +1168,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!vport->txq_grps) if (!vport->txq_grps)
return -ENOMEM; return -ENOMEM;
flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_SPLITQ_QSCHED);
for (i = 0; i < vport->num_txq_grp; i++) { for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter; struct idpf_adapter *adapter = vport->adapter;
...@@ -1195,8 +1199,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) ...@@ -1195,8 +1199,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
q->txq_grp = tx_qgrp; q->txq_grp = tx_qgrp;
hash_init(q->sched_buf_hash); hash_init(q->sched_buf_hash);
if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, if (flow_sch_en)
VIRTCHNL2_CAP_SPLITQ_QSCHED))
set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
} }
...@@ -1215,6 +1218,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) ...@@ -1215,6 +1218,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
tx_qgrp->complq->desc_count = vport->complq_desc_count; tx_qgrp->complq->desc_count = vport->complq_desc_count;
tx_qgrp->complq->vport = vport; tx_qgrp->complq->vport = vport;
tx_qgrp->complq->txq_grp = tx_qgrp; tx_qgrp->complq->txq_grp = tx_qgrp;
if (flow_sch_en)
__set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
} }
return 0; return 0;
......
...@@ -1473,7 +1473,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1473,7 +1473,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
/* Populate the queue info buffer with all queue context info */ /* Populate the queue info buffer with all queue context info */
for (i = 0; i < vport->num_txq_grp; i++) { for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
int j; int j, sched_mode;
for (j = 0; j < tx_qgrp->num_txq; j++, k++) { for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
qi[k].queue_id = qi[k].queue_id =
...@@ -1514,6 +1514,12 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1514,6 +1514,12 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
else
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
qi[k].sched_mode = cpu_to_le16(sched_mode);
k++; k++;
} }
...@@ -3140,6 +3146,7 @@ int idpf_vc_core_init(struct idpf_adapter *adapter) ...@@ -3140,6 +3146,7 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
err_intr_req: err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task); cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
idpf_vport_params_buf_rel(adapter); idpf_vport_params_buf_rel(adapter);
err_netdev_alloc: err_netdev_alloc:
kfree(adapter->vports); kfree(adapter->vports);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment