Commit b6df0078 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net


Trivial conflict in net/netfilter/nf_tables_api.c.

Duplicate fix in tools/testing/selftests/net/devlink_port_split.py
- take the net-next version.

skmsg, and L4 bpf - keep the bpf code but remove the flags
and err params.
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3f8ad50a a118ff66
......@@ -12675,6 +12675,7 @@ W: http://www.netfilter.org/
W: http://www.iptables.org/
W: http://www.nftables.org/
Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
C: irc://irc.libera.chat/netfilter
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
F: include/linux/netfilter*
......
......@@ -527,6 +527,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
/* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
writel(0x00000000, card->membase + VPM);
card->intcnt = 0;
if (request_irq
(pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
error = 9;
ns_init_card_error(card, error);
return error;
}
/* Initialize TSQ */
card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
NS_TSQSIZE + NS_TSQ_ALIGNMENT,
......@@ -753,15 +762,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
card->efbie = 1;
card->intcnt = 0;
if (request_irq
(pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
error = 9;
ns_init_card_error(card, error);
return error;
}
/* Register device */
card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
-1, NULL);
......@@ -839,10 +839,12 @@ static void ns_init_card_error(ns_dev *card, int error)
dev_kfree_skb_any(hb);
}
if (error >= 12) {
kfree(card->rsq.org);
dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
card->rsq.org, card->rsq.dma);
}
if (error >= 11) {
kfree(card->tsq.org);
dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
card->tsq.org, card->tsq.dma);
}
if (error >= 10) {
free_irq(card->pcidev->irq, card);
......
......@@ -1600,6 +1600,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
int link_reporting;
int res = 0, i;
if (slave_dev->flags & IFF_MASTER &&
!netif_is_bond_master(slave_dev)) {
NL_SET_ERR_MSG(extack, "Device with IFF_MASTER cannot be enslaved");
netdev_err(bond_dev,
"Error: Device with IFF_MASTER cannot be enslaved\n");
return -EPERM;
}
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
......
......@@ -351,8 +351,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
return err;
}
/* start network queue (echo_skb array is empty) */
netif_start_queue(ndev);
/* wake network queue up (echo_skb array is empty) */
netif_wake_queue(ndev);
return 0;
}
......
......@@ -1053,7 +1053,6 @@ static void ems_usb_disconnect(struct usb_interface *intf)
if (dev) {
unregister_netdev(dev->netdev);
free_candev(dev->netdev);
unlink_all_urbs(dev);
......@@ -1061,6 +1060,8 @@ static void ems_usb_disconnect(struct usb_interface *intf)
kfree(dev->intr_in_buffer);
kfree(dev->tx_msg_buffer);
free_candev(dev->netdev);
}
}
......
......@@ -1618,9 +1618,6 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
struct mv88e6xxx_vtu_entry vlan;
int i, err;
if (!vid)
return -EOPNOTSUPP;
/* DSA and CPU ports have to be members of multiple vlans */
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
return 0;
......@@ -2109,6 +2106,9 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
u8 member;
int err;
if (!vlan->vid)
return 0;
err = mv88e6xxx_port_vlan_prepare(ds, port, vlan);
if (err)
return err;
......
......@@ -1818,6 +1818,12 @@ static int sja1105_reload_cbs(struct sja1105_private *priv)
{
int rc = 0, i;
/* The credit based shapers are only allocated if
* CONFIG_NET_SCH_CBS is enabled.
*/
if (!priv->cbs)
return 0;
for (i = 0; i < priv->info->num_cbs_shapers; i++) {
struct sja1105_cbs_entry *cbs = &priv->cbs[i];
......
......@@ -1539,10 +1539,11 @@ static int greth_of_remove(struct platform_device *of_dev)
mdiobus_unregister(greth->mdio);
unregister_netdev(ndev);
free_netdev(ndev);
of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
free_netdev(ndev);
return 0;
}
......
......@@ -91,7 +91,7 @@ struct aq_macsec_txsc {
u32 hw_sc_idx;
unsigned long tx_sa_idx_busy;
const struct macsec_secy *sw_secy;
u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
struct aq_macsec_tx_sc_stats stats;
struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN];
};
......@@ -101,7 +101,7 @@ struct aq_macsec_rxsc {
unsigned long rx_sa_idx_busy;
const struct macsec_secy *sw_secy;
const struct macsec_rx_sc *sw_rxsc;
u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN];
};
......
......@@ -174,9 +174,6 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
if (!ring->slots)
goto err_free_buf_descs;
ring->read_idx = 0;
ring->write_idx = 0;
return 0;
err_free_buf_descs:
......@@ -304,6 +301,9 @@ static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
(uint32_t)ring->dma_addr);
ring->read_idx = 0;
ring->write_idx = 0;
}
static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
......
......@@ -550,7 +550,7 @@ int be_process_mcc(struct be_adapter *adapter)
int num = 0, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
spin_lock_bh(&adapter->mcc_cq_lock);
spin_lock(&adapter->mcc_cq_lock);
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
......@@ -566,7 +566,7 @@ int be_process_mcc(struct be_adapter *adapter)
if (num)
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
spin_unlock_bh(&adapter->mcc_cq_lock);
spin_unlock(&adapter->mcc_cq_lock);
return status;
}
......@@ -581,7 +581,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (be_check_error(adapter, BE_ERROR_ANY))
return -EIO;
local_bh_disable();
status = be_process_mcc(adapter);
local_bh_enable();
if (atomic_read(&mcc_obj->q.used) == 0)
break;
......
......@@ -5501,7 +5501,9 @@ static void be_worker(struct work_struct *work)
* mcc completions
*/
if (!netif_running(adapter->netdev)) {
local_bh_disable();
be_process_mcc(adapter);
local_bh_enable();
goto reschedule;
}
......
......@@ -607,7 +607,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* Get IRQ number */
priv->irq = platform_get_irq(pdev, 0);
if (!priv->irq) {
if (priv->irq < 0) {
dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
err = -ENODEV;
goto out_netdev;
......@@ -630,8 +630,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
out_netif_api:
netif_napi_del(&priv->napi);
out_netdev:
if (err)
free_netdev(ndev);
free_netdev(ndev);
return err;
}
......@@ -642,8 +641,8 @@ static s32 nps_enet_remove(struct platform_device *pdev)
struct nps_enet_priv *priv = netdev_priv(ndev);
unregister_netdev(ndev);
free_netdev(ndev);
netif_napi_del(&priv->napi);
free_netdev(ndev);
return 0;
}
......
......@@ -1506,8 +1506,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gve_write_version(&reg_bar->driver_version);
/* Get max queues to alloc etherdev */
max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
/* Alloc and setup the netdev and priv */
dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
if (!dev) {
......
......@@ -106,6 +106,8 @@ static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
......@@ -232,12 +234,11 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0;
reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
if (rc) {
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
mutex_unlock(&adapter->fw_lock);
return rc;
dev_err(dev, "send_request_map failed, rc = %d\n", rc);
goto out;
}
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
......@@ -245,20 +246,23 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
dev_err(dev,
"Long term map request aborted or timed out,rc = %d\n",
rc);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
mutex_unlock(&adapter->fw_lock);
return rc;
goto out;
}
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
adapter->fw_done_rc);
rc = -1;
goto out;
}
rc = 0;
out:
if (rc) {
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
mutex_unlock(&adapter->fw_lock);
return -1;
ltb->buff = NULL;
}
mutex_unlock(&adapter->fw_lock);
return 0;
return rc;
}
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
......@@ -278,14 +282,44 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
ltb->buff = NULL;
ltb->map_id = 0;
}
static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb)
static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb)
{
if (!ltb->buff)
return -EINVAL;
struct device *dev = &adapter->vdev->dev;
int rc;
memset(ltb->buff, 0, ltb->size);
mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0;
reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
if (rc) {
mutex_unlock(&adapter->fw_lock);
return rc;
}
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
if (rc) {
dev_info(dev,
"Reset failed, long term map request timed out or aborted\n");
mutex_unlock(&adapter->fw_lock);
return rc;
}
if (adapter->fw_done_rc) {
dev_info(dev,
"Reset failed, attempting to free and reallocate buffer\n");
free_long_term_buff(adapter, ltb);
mutex_unlock(&adapter->fw_lock);
return alloc_long_term_buff(adapter, ltb, ltb->size);
}
mutex_unlock(&adapter->fw_lock);
return 0;
}
......@@ -321,7 +355,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
rx_scrq = adapter->rx_scrq[pool->index];
ind_bufp = &rx_scrq->ind_buf;
for (i = 0; i < count; ++i) {
/* netdev_skb_alloc() could have failed after we saved a few skbs
* in the indir_buf and we would not have sent them to VIOS yet.
* To account for them, start the loop at ind_bufp->index rather
* than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
* be 0.
*/
for (i = ind_bufp->index; i < count; ++i) {
skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
if (!skb) {
dev_err(dev, "Couldn't replenish rx buff\n");
......@@ -507,7 +548,8 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
rx_pool->size *
rx_pool->buff_size);
} else {
rc = reset_long_term_buff(&rx_pool->long_term_buff);
rc = reset_long_term_buff(adapter,
&rx_pool->long_term_buff);
}
if (rc)
......@@ -630,11 +672,12 @@ static int init_rx_pools(struct net_device *netdev)
return 0;
}
static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool)
static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_tx_pool *tx_pool)
{
int rc, i;
rc = reset_long_term_buff(&tx_pool->long_term_buff);
rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
if (rc)
return rc;
......@@ -661,10 +704,11 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
rc = reset_one_tx_pool(&adapter->tso_pool[i]);
ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
if (rc)
return rc;
rc = reset_one_tx_pool(&adapter->tx_pool[i]);
rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
if (rc)
return rc;
}
......@@ -757,8 +801,11 @@ static int init_tx_pools(struct net_device *netdev)
adapter->tso_pool = kcalloc(tx_subcrqs,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tso_pool)
if (!adapter->tso_pool) {
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
return -1;
}
adapter->num_active_tx_pools = tx_subcrqs;
......@@ -1204,6 +1251,11 @@ static int __ibmvnic_open(struct net_device *netdev)
netif_tx_start_all_queues(netdev);
if (prev_state == VNIC_CLOSED) {
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
}
adapter->state = VNIC_OPEN;
return rc;
}
......@@ -1608,7 +1660,8 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
ind_bufp->index = 0;
if (atomic_sub_return(entries, &tx_scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev, queue_num)) {
__netif_subqueue_stopped(adapter->netdev, queue_num) &&
!test_bit(0, &adapter->resetting)) {
netif_wake_subqueue(adapter->netdev, queue_num);
netdev_dbg(adapter->netdev, "Started queue %d\n",
queue_num);
......@@ -1701,7 +1754,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
......@@ -3241,6 +3293,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
i);
ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
if (adapter->tx_scrq[i]->irq) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
......@@ -3314,7 +3367,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error.
*/
if (rc && rc != H_FUNCTION)
if (rc && (rc != H_FUNCTION))
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
}
......
......@@ -5223,18 +5223,20 @@ static void e1000_watchdog_task(struct work_struct *work)
pm_runtime_resume(netdev->dev.parent);
/* Checking if MAC is in DMoff state*/
pcim_state = er32(STATUS);
while (pcim_state & E1000_STATUS_PCIM_STATE) {
if (tries++ == dmoff_exit_timeout) {
e_dbg("Error in exiting dmoff\n");
break;
}
usleep_range(10000, 20000);
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
pcim_state = er32(STATUS);
/* Checking if MAC exited DMoff state */
if (!(pcim_state & E1000_STATUS_PCIM_STATE))
e1000_phy_hw_reset(&adapter->hw);
while (pcim_state & E1000_STATUS_PCIM_STATE) {
if (tries++ == dmoff_exit_timeout) {
e_dbg("Error in exiting dmoff\n");
break;
}
usleep_range(10000, 20000);
pcim_state = er32(STATUS);
/* Checking if MAC exited DMoff state */
if (!(pcim_state & E1000_STATUS_PCIM_STATE))
e1000_phy_hw_reset(&adapter->hw);
}
}
/* update snapshot of PHY registers on LSC */
......
......@@ -1262,8 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
if (ethtool_link_ksettings_test_link_mode(&safe_ks,
supported,
Autoneg) &&
hw->phy.link_info.phy_type !=
I40E_PHY_TYPE_10GBASE_T) {
hw->phy.media_type != I40E_MEDIA_TYPE_BASET) {
netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
err = -EINVAL;
goto done;
......
......@@ -32,7 +32,7 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
......@@ -8703,6 +8703,8 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
dev_driver_string(&pf->pdev->dev),
dev_name(&pf->pdev->dev));
err = i40e_vsi_request_irq(vsi, int_name);
if (err)
goto err_setup_rx;
} else {
err = -EINVAL;
......@@ -10569,7 +10571,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
#endif /* CONFIG_I40E_DCB */
if (!lock_acquired)
rtnl_lock();
ret = i40e_setup_pf_switch(pf, reinit);
ret = i40e_setup_pf_switch(pf, reinit, true);
if (ret)
goto end_unlock;
......@@ -14627,10 +14629,11 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
* i40e_setup_pf_switch - Setup the HW switch on startup or after reset
* @pf: board private structure
* @reinit: if the Main VSI needs to re-initialized.
* @lock_acquired: indicates whether or not the lock has been acquired
*
* Returns 0 on success, negative value on failure
**/
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
u16 flags = 0;
int ret;
......@@ -14732,9 +14735,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_ptp_init(pf);
if (!lock_acquired)
rtnl_lock();
/* repopulate tunnel port filters */
udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
if (!lock_acquired)
rtnl_unlock();
return ret;
}
......@@ -15528,7 +15537,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
}
#endif
err = i40e_setup_pf_switch(pf, false);
err = i40e_setup_pf_switch(pf, false, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
......
......@@ -11,13 +11,14 @@
* operate with the nanosecond field directly without fear of overflow.
*
* Much like the 82599, the update period is dependent upon the link speed:
* At 40Gb link or no link, the period is 1.6ns.
* At 10Gb link, the period is multiplied by 2. (3.2ns)
* At 40Gb, 25Gb, or no link, the period is 1.6ns.
* At 10Gb or 5Gb link, the period is multiplied by 2. (3.2ns)
* At 1Gb link, the period is multiplied by 20. (32ns)
* 1588 functionality is not supported at 100Mbps.
*/
#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
#define I40E_PTP_10GB_INCVAL_MULT 2
#define I40E_PTP_5GB_INCVAL_MULT 2
#define I40E_PTP_1GB_INCVAL_MULT 20
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
......@@ -465,6 +466,9 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
case I40E_LINK_SPEED_10GB:
mult = I40E_PTP_10GB_INCVAL_MULT;
break;
case I40E_LINK_SPEED_5GB:
mult = I40E_PTP_5GB_INCVAL_MULT;
break;
case I40E_LINK_SPEED_1GB:
mult = I40E_PTP_1GB_INCVAL_MULT;
break;
......
......@@ -1230,8 +1230,10 @@ static int mana_create_txq(struct mana_port_context *apc,
cq->gdma_id = cq->gdma_cq->id;
if (WARN_ON(cq->gdma_id >= gc->max_num_cqs))
return -EINVAL;
if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
err = -EINVAL;
goto out;
}
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment