Commit a6e3d86e authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2022-11-23 (ice)

This series contains updates to ice driver only.

Karol adjusts check of PTP hardware to wait longer but check more often.

Brett removes use of driver defined link speed; instead using the values
from ethtool.h, utilizing static tables for indexing.

Ben adds tracking of stats in order to accumulate reported statistics that
were previously reset by hardware.

Marcin fixes issues setting RXDID when queues are asymmetric.

Anatolii re-introduces use of define over magic number; ICE_RLAN_BASE_S.
---
v3:
 - Dropped, previous, patch 2
v2:
Patch 5
 - Convert some allocations to non-managed
 - Remove combined error checking; add error checks for each call
 - Remove excess NULL checks
 - Remove unnecessary NULL sets and newlines
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bed6e865 60aeca6d
......@@ -320,6 +320,11 @@ enum ice_vsi_state {
ICE_VSI_STATE_NBITS /* must be last */
};
struct ice_vsi_stats {
struct ice_ring_stats **tx_ring_stats; /* Tx ring stats array */
struct ice_ring_stats **rx_ring_stats; /* Rx ring stats array */
};
/* struct that defines a VSI, associated with a dev */
struct ice_vsi {
struct net_device *netdev;
......@@ -373,6 +378,7 @@ struct ice_vsi {
/* VSI stats */
struct rtnl_link_stats64 net_stats;
struct rtnl_link_stats64 net_stats_prev;
struct ice_eth_stats eth_stats;
struct ice_eth_stats eth_stats_prev;
......@@ -540,6 +546,7 @@ struct ice_pf {
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_vsi_stats **vsi_stats;
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
struct ice_vfs vfs;
......
......@@ -389,7 +389,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
* Indicates the starting address of the descriptor queue defined in
* 128 Byte units.
*/
rlan_ctx.base = ring->dma >> 7;
rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
rlan_ctx.qlen = ring->count;
......
......@@ -2948,8 +2948,8 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one PHY type to its
* speed.
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
*/
static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
......@@ -5515,3 +5515,40 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
ICE_FW_API_REPORT_DFLT_CFG_MIN,
ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
/* each of the indexes into the following array match the speed of a return
* value from the list of AQ returned speeds like the range:
* ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
* ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this
* array. The array is defined as 15 elements long because the link_speed
* returned by the firmware is a 16 bit * value, but is indexed
* by [fls(speed) - 1]
*/
static const u32 ice_aq_to_link_speed[15] = {
SPEED_10, /* BIT(0) */
SPEED_100,
SPEED_1000,
SPEED_2500,
SPEED_5000,
SPEED_10000,
SPEED_20000,
SPEED_25000,
SPEED_40000,
SPEED_50000,
SPEED_100000, /* BIT(10) */
0,
0,
0,
0 /* BIT(14) */
};
/**
* ice_get_link_speed - get integer speed from table
* @index: array index from fls(aq speed) - 1
*
* Returns: u32 value containing integer speed
*/
u32 ice_get_link_speed(u16 index)
{
return ice_aq_to_link_speed[index];
}
......@@ -163,6 +163,7 @@ int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
u32 ice_get_link_speed(u16 index);
int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
......
......@@ -881,6 +881,9 @@ void ice_update_dcb_stats(struct ice_pf *pf)
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
if (ice_is_reset_in_progress(pf->state))
pf->stat_prev_loaded = false;
for (i = 0; i < 8; i++) {
ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i),
pf->stat_prev_loaded,
......
......@@ -1544,9 +1544,9 @@ __ice_get_ethtool_stats(struct net_device *netdev,
ice_for_each_alloc_txq(vsi, j) {
tx_ring = READ_ONCE(vsi->tx_rings[j]);
if (tx_ring) {
data[i++] = tx_ring->stats.pkts;
data[i++] = tx_ring->stats.bytes;
if (tx_ring && tx_ring->ring_stats) {
data[i++] = tx_ring->ring_stats->stats.pkts;
data[i++] = tx_ring->ring_stats->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
......@@ -1555,9 +1555,9 @@ __ice_get_ethtool_stats(struct net_device *netdev,
ice_for_each_alloc_rxq(vsi, j) {
rx_ring = READ_ONCE(vsi->rx_rings[j]);
if (rx_ring) {
data[i++] = rx_ring->stats.pkts;
data[i++] = rx_ring->stats.bytes;
if (rx_ring && rx_ring->ring_stats) {
data[i++] = rx_ring->ring_stats->stats.pkts;
data[i++] = rx_ring->ring_stats->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
......
......@@ -908,17 +908,5 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
return ice_ptype_lkup[ptype];
}
#define ICE_LINK_SPEED_UNKNOWN 0
#define ICE_LINK_SPEED_10MBPS 10
#define ICE_LINK_SPEED_100MBPS 100
#define ICE_LINK_SPEED_1000MBPS 1000
#define ICE_LINK_SPEED_2500MBPS 2500
#define ICE_LINK_SPEED_5000MBPS 5000
#define ICE_LINK_SPEED_10000MBPS 10000
#define ICE_LINK_SPEED_20000MBPS 20000
#define ICE_LINK_SPEED_25000MBPS 25000
#define ICE_LINK_SPEED_40000MBPS 40000
#define ICE_LINK_SPEED_50000MBPS 50000
#define ICE_LINK_SPEED_100000MBPS 100000
#endif /* _ICE_LAN_TX_RX_H_ */
This diff is collapsed.
......@@ -130,12 +130,17 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
ice_for_each_txq(vsi, i) {
struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
struct ice_ring_stats *ring_stats;
if (!tx_ring)
continue;
if (ice_ring_ch_enabled(tx_ring))
continue;
ring_stats = tx_ring->ring_stats;
if (!ring_stats)
continue;
if (tx_ring->desc) {
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
......@@ -144,8 +149,8 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* prev_pkt would be negative if there was no
* pending work.
*/
packets = tx_ring->stats.pkts & INT_MAX;
if (tx_ring->tx_stats.prev_pkt == packets) {
packets = ring_stats->stats.pkts & INT_MAX;
if (ring_stats->tx_stats.prev_pkt == packets) {
/* Trigger sw interrupt to revive the queue */
ice_trigger_sw_intr(hw, tx_ring->q_vector);
continue;
......@@ -155,7 +160,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* to ice_get_tx_pending()
*/
smp_rmb();
tx_ring->tx_stats.prev_pkt =
ring_stats->tx_stats.prev_pkt =
ice_get_tx_pending(tx_ring) ? packets : -1;
}
}
......@@ -2546,13 +2551,20 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
ice_for_each_xdp_txq(vsi, i) {
u16 xdp_q_idx = vsi->alloc_txq + i;
struct ice_ring_stats *ring_stats;
struct ice_tx_ring *xdp_ring;
xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
if (!xdp_ring)
goto free_xdp_rings;
ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
if (!ring_stats) {
ice_free_tx_ring(xdp_ring);
goto free_xdp_rings;
}
xdp_ring->ring_stats = ring_stats;
xdp_ring->q_index = xdp_q_idx;
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
xdp_ring->vsi = vsi;
......@@ -2575,9 +2587,13 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
return 0;
free_xdp_rings:
for (; i >= 0; i--)
if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
for (; i >= 0; i--) {
if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
vsi->xdp_rings[i]->ring_stats = NULL;
ice_free_tx_ring(vsi->xdp_rings[i]);
}
}
return -ENOMEM;
}
......@@ -2778,6 +2794,8 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
synchronize_rcu();
ice_free_tx_ring(vsi->xdp_rings[i]);
}
kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
vsi->xdp_rings[i]->ring_stats = NULL;
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
}
......@@ -4756,11 +4774,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_init_pf_unroll;
}
pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
sizeof(*pf->vsi_stats), GFP_KERNEL);
if (!pf->vsi_stats) {
err = -ENOMEM;
goto err_init_vsi_unroll;
}
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
goto err_init_vsi_unroll;
goto err_init_vsi_stats_unroll;
}
/* In case of MSIX we are going to setup the misc vector right here
......@@ -4941,6 +4966,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
err_init_vsi_stats_unroll:
devm_kfree(dev, pf->vsi_stats);
pf->vsi_stats = NULL;
err_init_vsi_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
......@@ -5063,6 +5091,8 @@ static void ice_remove(struct pci_dev *pdev)
continue;
ice_vsi_free_q_vectors(pf->vsi[i]);
}
devm_kfree(&pdev->dev, pf->vsi_stats);
pf->vsi_stats = NULL;
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
......@@ -6380,14 +6410,16 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
if (!ring)
if (!ring || !ring->ring_stats)
continue;
ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
ring->ring_stats->stats, &pkts,
&bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
vsi->tx_busy += ring->tx_stats.tx_busy;
vsi->tx_linearize += ring->tx_stats.tx_linearize;
vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
}
}
......@@ -6397,6 +6429,7 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
*/
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *net_stats, *stats_prev;
struct rtnl_link_stats64 *vsi_stats;
u64 pkts, bytes;
int i;
......@@ -6421,12 +6454,16 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
struct ice_ring_stats *ring_stats;
ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
ring_stats = ring->ring_stats;
ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
ring_stats->stats, &pkts,
&bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
}
/* update XDP Tx rings counters */
......@@ -6436,10 +6473,28 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_unlock();
vsi->net_stats.tx_packets = vsi_stats->tx_packets;
vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
vsi->net_stats.rx_packets = vsi_stats->rx_packets;
vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
net_stats = &vsi->net_stats;
stats_prev = &vsi->net_stats_prev;
/* clear prev counters after reset */
if (vsi_stats->tx_packets < stats_prev->tx_packets ||
vsi_stats->rx_packets < stats_prev->rx_packets) {
stats_prev->tx_packets = 0;
stats_prev->tx_bytes = 0;
stats_prev->rx_packets = 0;
stats_prev->rx_bytes = 0;
}
/* update netdev counters */
net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
stats_prev->tx_packets = vsi_stats->tx_packets;
stats_prev->tx_bytes = vsi_stats->tx_bytes;
stats_prev->rx_packets = vsi_stats->rx_packets;
stats_prev->rx_bytes = vsi_stats->rx_bytes;
kfree(vsi_stats);
}
......@@ -6501,6 +6556,9 @@ void ice_update_pf_stats(struct ice_pf *pf)
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
if (ice_is_reset_in_progress(pf->state))
pf->stat_prev_loaded = false;
ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
&prev_ps->eth.rx_bytes,
&cur_ps->eth.rx_bytes);
......
......@@ -2963,16 +2963,18 @@ bool ice_ptp_lock(struct ice_hw *hw)
u32 hw_lock;
int i;
#define MAX_TRIES 5
#define MAX_TRIES 15
for (i = 0; i < MAX_TRIES; i++) {
hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
if (!hw_lock)
break;
if (hw_lock) {
/* Somebody is holding the lock */
usleep_range(5000, 6000);
continue;
}
/* Somebody is holding the lock */
usleep_range(10000, 20000);
break;
}
return !hw_lock;
......
......@@ -156,18 +156,20 @@ ice_repr_sp_stats64(const struct net_device *dev,
u64 pkts, bytes;
tx_ring = np->vsi->tx_rings[vf_id];
ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats,
ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
tx_ring->ring_stats->stats,
&pkts, &bytes);
stats->rx_packets = pkts;
stats->rx_bytes = bytes;
rx_ring = np->vsi->rx_rings[vf_id];
ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats,
ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
rx_ring->ring_stats->stats,
&pkts, &bytes);
stats->tx_packets = pkts;
stats->tx_bytes = bytes;
stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed +
rx_ring->rx_stats.alloc_buf_failed;
stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
rx_ring->ring_stats->rx_stats.alloc_buf_failed;
return 0;
}
......
......@@ -325,7 +325,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
netif_tx_wake_queue(txring_txq(tx_ring));
++tx_ring->tx_stats.restart_q;
++tx_ring->ring_stats->tx_stats.restart_q;
}
}
......@@ -367,7 +367,7 @@ int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
tx_ring->tx_stats.prev_pkt = -1;
tx_ring->ring_stats->tx_stats.prev_pkt = -1;
return 0;
err:
......@@ -667,7 +667,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
rx_ring->ring_stats->rx_stats.alloc_page_failed++;
return false;
}
......@@ -680,7 +680,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, ice_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
rx_ring->ring_stats->rx_stats.alloc_page_failed++;
return false;
}
......@@ -1091,7 +1091,7 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
return false;
rx_ring->rx_stats.non_eop_descs++;
rx_ring->ring_stats->rx_stats.non_eop_descs++;
return true;
}
......@@ -1222,7 +1222,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
}
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
if (rx_buf)
rx_buf->pagecnt_bias++;
break;
......@@ -1275,7 +1275,9 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
rx_ring->skb = skb;
ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
if (rx_ring->ring_stats)
ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
total_rx_bytes);
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_pkts;
......@@ -1292,15 +1294,25 @@ static void __ice_update_sample(struct ice_q_vector *q_vector,
struct ice_tx_ring *tx_ring;
ice_for_each_tx_ring(tx_ring, *rc) {
packets += tx_ring->stats.pkts;
bytes += tx_ring->stats.bytes;
struct ice_ring_stats *ring_stats;
ring_stats = tx_ring->ring_stats;
if (!ring_stats)
continue;
packets += ring_stats->stats.pkts;
bytes += ring_stats->stats.bytes;
}
} else {
struct ice_rx_ring *rx_ring;
ice_for_each_rx_ring(rx_ring, *rc) {
packets += rx_ring->stats.pkts;
bytes += rx_ring->stats.bytes;
struct ice_ring_stats *ring_stats;
ring_stats = rx_ring->ring_stats;
if (!ring_stats)
continue;
packets += ring_stats->stats.pkts;
bytes += ring_stats->stats.bytes;
}
}
......@@ -1549,7 +1561,7 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_tx_start_queue(txring_txq(tx_ring));
++tx_ring->tx_stats.restart_q;
++tx_ring->ring_stats->tx_stats.restart_q;
return 0;
}
......@@ -2293,7 +2305,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
if (__skb_linearize(skb))
goto out_drop;
count = ice_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
tx_ring->ring_stats->tx_stats.tx_linearize++;
}
/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
......@@ -2304,7 +2316,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
*/
if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
ICE_DESCS_FOR_CTX_DESC)) {
tx_ring->tx_stats.tx_busy++;
tx_ring->ring_stats->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
......
......@@ -191,6 +191,16 @@ struct ice_rxq_stats {
u64 alloc_buf_failed;
};
struct ice_ring_stats {
struct rcu_head rcu; /* to avoid race on free */
struct ice_q_stats stats;
struct u64_stats_sync syncp;
union {
struct ice_txq_stats tx_stats;
struct ice_rxq_stats rx_stats;
};
};
enum ice_ring_state_t {
ICE_TX_XPS_INIT_DONE,
ICE_TX_NBITS,
......@@ -283,9 +293,7 @@ struct ice_rx_ring {
u16 rx_buf_len;
/* stats structs */
struct ice_rxq_stats rx_stats;
struct ice_q_stats stats;
struct u64_stats_sync syncp;
struct ice_ring_stats *ring_stats;
struct rcu_head rcu; /* to avoid race on free */
/* CL4 - 3rd cacheline starts here */
......@@ -325,10 +333,8 @@ struct ice_tx_ring {
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
/* stats structs */
struct ice_txq_stats tx_stats;
struct ice_ring_stats *ring_stats;
/* CL3 - 3rd cacheline starts here */
struct ice_q_stats stats;
struct u64_stats_sync syncp;
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct ice_channel *ch;
......
......@@ -285,7 +285,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
ice_clean_xdp_irq(xdp_ring);
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
xdp_ring->tx_stats.tx_busy++;
xdp_ring->ring_stats->tx_stats.tx_busy++;
return ICE_XDP_CONSUMED;
}
......
......@@ -39,6 +39,24 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
static const u32 ice_legacy_aq_to_vc_speed[15] = {
VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
VIRTCHNL_LINK_SPEED_100MB,
VIRTCHNL_LINK_SPEED_1GB,
VIRTCHNL_LINK_SPEED_1GB,
VIRTCHNL_LINK_SPEED_1GB,
VIRTCHNL_LINK_SPEED_10GB,
VIRTCHNL_LINK_SPEED_20GB,
VIRTCHNL_LINK_SPEED_25GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_UNKNOWN,
VIRTCHNL_LINK_SPEED_UNKNOWN,
VIRTCHNL_LINK_SPEED_UNKNOWN,
VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */
};
/**
* ice_conv_link_speed_to_virtchnl
* @adv_link_support: determines the format of the returned link speed
......@@ -55,79 +73,17 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
u32 speed;
if (adv_link_support)
switch (link_speed) {
case ICE_AQ_LINK_SPEED_10MB:
speed = ICE_LINK_SPEED_10MBPS;
break;
case ICE_AQ_LINK_SPEED_100MB:
speed = ICE_LINK_SPEED_100MBPS;
break;
case ICE_AQ_LINK_SPEED_1000MB:
speed = ICE_LINK_SPEED_1000MBPS;
break;
case ICE_AQ_LINK_SPEED_2500MB:
speed = ICE_LINK_SPEED_2500MBPS;
break;
case ICE_AQ_LINK_SPEED_5GB:
speed = ICE_LINK_SPEED_5000MBPS;
break;
case ICE_AQ_LINK_SPEED_10GB:
speed = ICE_LINK_SPEED_10000MBPS;
break;
case ICE_AQ_LINK_SPEED_20GB:
speed = ICE_LINK_SPEED_20000MBPS;
break;
case ICE_AQ_LINK_SPEED_25GB:
speed = ICE_LINK_SPEED_25000MBPS;
break;
case ICE_AQ_LINK_SPEED_40GB:
speed = ICE_LINK_SPEED_40000MBPS;
break;
case ICE_AQ_LINK_SPEED_50GB:
speed = ICE_LINK_SPEED_50000MBPS;
break;
case ICE_AQ_LINK_SPEED_100GB:
speed = ICE_LINK_SPEED_100000MBPS;
break;
default:
speed = ICE_LINK_SPEED_UNKNOWN;
break;
}
else
if (adv_link_support) {
/* convert a BIT() value into an array index */
speed = ice_get_link_speed(fls(link_speed) - 1);
} else {
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
switch (link_speed) {
case ICE_AQ_LINK_SPEED_10MB:
case ICE_AQ_LINK_SPEED_100MB:
speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
break;
case ICE_AQ_LINK_SPEED_1000MB:
case ICE_AQ_LINK_SPEED_2500MB:
case ICE_AQ_LINK_SPEED_5GB:
speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
break;
case ICE_AQ_LINK_SPEED_10GB:
speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
break;
case ICE_AQ_LINK_SPEED_20GB:
speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
break;
case ICE_AQ_LINK_SPEED_25GB:
speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
break;
case ICE_AQ_LINK_SPEED_40GB:
case ICE_AQ_LINK_SPEED_50GB:
case ICE_AQ_LINK_SPEED_100GB:
speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break;
default:
speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
break;
}
speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1];
}
return speed;
}
......
......@@ -1621,9 +1621,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
for (i = 0; i < qci->num_queue_pairs; i++) {
struct ice_hw *hw;
u32 rxdid;
u16 pf_q;
qpi = &qci->qpair[i];
if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
......@@ -1664,6 +1661,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Rx queue info from VF into VSI */
if (qpi->rxq.ring_len > 0) {
u16 max_frame_size = ice_vc_get_max_frame_size(vf);
u32 rxdid;
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
......@@ -1691,26 +1689,25 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vf->vf_id, i);
goto error_param;
}
}
/* VF Rx queue RXDID configuration */
pf_q = vsi->rxq_map[qpi->rxq.queue_id];
rxdid = qpi->rxq.rxdid;
hw = &vsi->back->hw;
/* If Rx flex desc is supported, select RXDID for Rx
* queues. Otherwise, use legacy 32byte descriptor
* format. Legacy 16byte descriptor is not supported.
* If this RXDID is selected, return error.
*/
if (vf->driver_caps &
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
rxdid = qpi->rxq.rxdid;
if (!(BIT(rxdid) & pf->supported_rxdids))
goto error_param;
} else {
rxdid = ICE_RXDID_LEGACY_1;
}
/* If Rx flex desc is supported, select RXDID for Rx queues.
* Otherwise, use legacy 32byte descriptor format.
* Legacy 16byte descriptor is not supported. If this RXDID
* is selected, return error.
*/
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
if (!(BIT(rxdid) & pf->supported_rxdids))
goto error_param;
} else {
rxdid = ICE_RXDID_LEGACY_1;
ice_write_qrxflxp_cntxt(&vsi->back->hw,
vsi->rxq_map[q_idx],
rxdid, 0x03, false);
}
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x03, false);
}
/* send the response to the VF */
......
......@@ -24,13 +24,24 @@ static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
*/
static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
{
memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
sizeof(vsi->rx_rings[q_idx]->rx_stats));
memset(&vsi->tx_rings[q_idx]->stats, 0,
sizeof(vsi->tx_rings[q_idx]->stats));
struct ice_vsi_stats *vsi_stat;
struct ice_pf *pf;
pf = vsi->back;
if (!pf->vsi_stats)
return;
vsi_stat = pf->vsi_stats[vsi->idx];
if (!vsi_stat)
return;
memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
if (ice_is_xdp_ena_vsi(vsi))
memset(&vsi->xdp_rings[q_idx]->stats, 0,
sizeof(vsi->xdp_rings[q_idx]->stats));
memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
}
/**
......@@ -722,7 +733,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
break;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment