Commit 3c76feff authored by Ariel Elior's avatar Ariel Elior Committed by David S. Miller

bnx2x: Control number of vfs dynamically

1. Support sysfs interface for getting the maximal number of virtual functions
   of a given physical function.
2. Support sysfs interface for getting and setting the current number of
   virtual functions.
Signed-off-by: default avatarAriel Elior <ariele@broadcom.com>
Signed-off-by: default avatarYuval Mintz <yuvalmin@broadcom.com>
Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3ec9f9ca
...@@ -1281,6 +1281,8 @@ struct bnx2x { ...@@ -1281,6 +1281,8 @@ struct bnx2x {
dma_addr_t pf2vf_bulletin_mapping; dma_addr_t pf2vf_bulletin_mapping;
struct pf_vf_bulletin_content old_bulletin; struct pf_vf_bulletin_content old_bulletin;
u16 requested_nr_virtfn;
#endif /* CONFIG_BNX2X_SRIOV */ #endif /* CONFIG_BNX2X_SRIOV */
struct net_device *dev; struct net_device *dev;
......
...@@ -9546,8 +9546,10 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) ...@@ -9546,8 +9546,10 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
/* enable SR-IOV if applicable */ /* enable SR-IOV if applicable */
if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
&bp->sp_rtnl_state)) &bp->sp_rtnl_state)) {
bnx2x_disable_sriov(bp);
bnx2x_enable_sriov(bp); bnx2x_enable_sriov(bp);
}
} }
static void bnx2x_period_task(struct work_struct *work) static void bnx2x_period_task(struct work_struct *work)
...@@ -11423,26 +11425,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) ...@@ -11423,26 +11425,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* net_device service functions * net_device service functions
*/ */
static int bnx2x_open_epilog(struct bnx2x *bp)
{
/* Enable sriov via delayed work. This must be done via delayed work
* because it causes the probe of the vf devices to be run, which invoke
* register_netdevice which must have rtnl lock taken. As we are holding
* the lock right now, that could only work if the probe would not take
* the lock. However, as the probe of the vf may be called from other
* contexts as well (such as passthrough to vm failes) it can't assume
* the lock is being held for it. Using delayed work here allows the
* probe code to simply take the lock (i.e. wait for it to be released
* if it is being held).
*/
smp_mb__before_clear_bit();
set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
return 0;
}
/* called with rtnl_lock */ /* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev) static int bnx2x_open(struct net_device *dev)
{ {
...@@ -12498,13 +12480,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, ...@@ -12498,13 +12480,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
goto init_one_exit; goto init_one_exit;
} }
/* Enable SRIOV if capability found in configuration space. /* Enable SRIOV if capability found in configuration space */
* Once the generic SR-IOV framework makes it in from the rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
* pci tree this will be revised, to allow dynamic control
* over the number of VFs. Right now, change the num of vfs
* param below to enable SR-IOV.
*/
rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
if (rc) if (rc)
goto init_one_exit; goto init_one_exit;
...@@ -12820,6 +12797,9 @@ static struct pci_driver bnx2x_pci_driver = { ...@@ -12820,6 +12797,9 @@ static struct pci_driver bnx2x_pci_driver = {
.suspend = bnx2x_suspend, .suspend = bnx2x_suspend,
.resume = bnx2x_resume, .resume = bnx2x_resume,
.err_handler = &bnx2x_err_handler, .err_handler = &bnx2x_err_handler,
#ifdef CONFIG_BNX2X_SRIOV
.sriov_configure = bnx2x_sriov_configure,
#endif
}; };
static int __init bnx2x_init(void) static int __init bnx2x_init(void)
......
...@@ -1467,7 +1467,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) ...@@ -1467,7 +1467,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
return bnx2x_is_pcie_pending(dev); return bnx2x_is_pcie_pending(dev);
unknown_dev: unknown_dev:
BNX2X_ERR("Unknown device\n");
return false; return false;
} }
...@@ -1972,8 +1971,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, ...@@ -1972,8 +1971,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
if (iov->total == 0) if (iov->total == 0)
goto failed; goto failed;
/* calculate the actual number of VFs */ iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
num_vfs_param, iov->nr_virtfn);
/* allocate the vf array */ /* allocate the vf array */
bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
...@@ -3020,21 +3021,47 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -3020,21 +3021,47 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->op_current = CHANNEL_TLV_NONE; vf->op_current = CHANNEL_TLV_NONE;
} }
void bnx2x_enable_sriov(struct bnx2x *bp) int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{ {
int rc = 0;
/* disbale sriov in case it is still enabled */ struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
pci_disable_sriov(bp->pdev);
DP(BNX2X_MSG_IOV, "sriov disabled\n");
/* enable sriov */ DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn)); num_vfs_param, BNX2X_NR_VIRTFN(bp));
rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
if (rc) /* HW channel is only operational when PF is up */
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down");
return -EINVAL;
}
/* we are always bound by the total_vfs in the configuration space */
if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
num_vfs_param, BNX2X_NR_VIRTFN(bp));
num_vfs_param = BNX2X_NR_VIRTFN(bp);
}
bp->requested_nr_virtfn = num_vfs_param;
if (num_vfs_param == 0) {
pci_disable_sriov(dev);
return 0;
} else {
return bnx2x_enable_sriov(bp);
}
}
int bnx2x_enable_sriov(struct bnx2x *bp)
{
int rc = 0, req_vfs = bp->requested_nr_virtfn;
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
else return rc;
DP(BNX2X_MSG_IOV, "sriov enabled\n"); }
DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
return req_vfs;
} }
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
...@@ -3050,6 +3077,11 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) ...@@ -3050,6 +3077,11 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
} }
} }
void bnx2x_disable_sriov(struct bnx2x *bp)
{
pci_disable_sriov(bp->pdev);
}
static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
struct bnx2x_virtf *vf) struct bnx2x_virtf *vf)
{ {
...@@ -3087,6 +3119,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ...@@ -3087,6 +3119,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
if (rc) if (rc)
return rc; return rc;
if (!mac_obj || !vlan_obj || !bulletin) {
BNX2X_ERR("VF partially initialized\n");
return -EINVAL;
}
ivi->vf = vfidx; ivi->vf = vfidx;
ivi->qos = 0; ivi->qos = 0;
...@@ -3405,3 +3441,26 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) ...@@ -3405,3 +3441,26 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
sizeof(union pf_vf_bulletin)); sizeof(union pf_vf_bulletin));
return -ENOMEM; return -ENOMEM;
} }
int bnx2x_open_epilog(struct bnx2x *bp)
{
/* Enable sriov via delayed work. This must be done via delayed work
* because it causes the probe of the vf devices to be run, which invoke
* register_netdevice which must have rtnl lock taken. As we are holding
* the lock right now, that could only work if the probe would not take
* the lock. However, as the probe of the vf may be called from other
* contexts as well (such as passthrough to vm failes) it can't assume
* the lock is being held for it. Using delayed work here allows the
* probe code to simply take the lock (i.e. wait for it to be released
* if it is being held). We only want to do this if the number of VFs
* was set before PF driver was loaded.
*/
if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
smp_mb__before_clear_bit();
set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
smp_mb__after_clear_bit();
schedule_delayed_work(&bp->sp_rtnl_task, 0);
}
return 0;
}
...@@ -753,12 +753,15 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, ...@@ -753,12 +753,15 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
void bnx2x_vf_map_doorbells(struct bnx2x *bp); void bnx2x_vf_map_doorbells(struct bnx2x *bp);
int bnx2x_vf_pci_alloc(struct bnx2x *bp); int bnx2x_vf_pci_alloc(struct bnx2x *bp);
void bnx2x_enable_sriov(struct bnx2x *bp); int bnx2x_enable_sriov(struct bnx2x *bp);
void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp) static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{ {
return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
} }
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
int bnx2x_open_epilog(struct bnx2x *bp);
#else /* CONFIG_BNX2X_SRIOV */ #else /* CONFIG_BNX2X_SRIOV */
...@@ -781,7 +784,8 @@ static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {} ...@@ -781,7 +784,8 @@ static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
int num_vfs_param) {return 0; } int num_vfs_param) {return 0; }
static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
static inline void bnx2x_enable_sriov(struct bnx2x *bp) {} static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
u8 tx_count, u8 rx_count) {return 0; } u8 tx_count, u8 rx_count) {return 0; }
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
...@@ -807,6 +811,8 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp ...@@ -807,6 +811,8 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp
static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; } static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */ #endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */ #endif /* bnx2x_sriov.h */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment