Commit e3219ce6 authored by Anjali Singhai Jain's avatar Anjali Singhai Jain Committed by Doug Ledford

i40e: Add support for client interface for IWARP driver

This patch adds a Client interface for i40iw driver
support. Also expands the Virtchannel to support messages
from i40evf driver on behalf of i40iwvf driver.

This client API is used by the i40iw and i40iwvf driver
to access the core driver resources brokered by the i40e driver.
Signed-off-by: default avatarAnjali Singhai Jain <anjali.singhai@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent fc77dbd3
......@@ -41,6 +41,7 @@ i40e-objs := i40e_main.o \
i40e_diag.o \
i40e_txrx.o \
i40e_ptp.o \
i40e_client.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
......
......@@ -58,6 +58,7 @@
#ifdef I40E_FCOE
#include "i40e_fcoe.h"
#endif
#include "i40e_client.h"
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
......@@ -178,6 +179,7 @@ struct i40e_lump_tracking {
u16 search_hint;
u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000
#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
};
#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
......@@ -270,6 +272,8 @@ struct i40e_pf {
#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */
u16 num_iwarp_msix; /* num of iwarp vectors for this PF */
int iwarp_base_vector;
int queues_left; /* queues left unclaimed */
u16 alloc_rss_size; /* allocated RSS queues */
u16 rss_size_max; /* HW defined max RSS queues */
......@@ -317,6 +321,7 @@ struct i40e_pf {
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
......@@ -557,6 +562,8 @@ struct i40e_vsi {
struct kobject *kobj; /* sysfs object */
bool current_isup; /* Sync 'link up' logging */
void *priv; /* client driver data reference. */
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
......@@ -714,6 +721,10 @@ void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc, bool is_add);
#endif
void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len);
int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
......@@ -736,6 +747,17 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
/* needed by client drivers */
int i40e_lan_add_device(struct i40e_pf *pf);
int i40e_lan_del_device(struct i40e_pf *pf);
void i40e_client_subtask(struct i40e_pf *pf);
void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi);
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
enum i40e_client_type type);
/**
* i40e_irq_dynamic_enable - Enable default interrupt generation settings
* @vsi: pointer to a vsi
......
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include <linux/list.h>
#include <linux/errno.h>
#include "i40e.h"
#include "i40e_prototype.h"
#include "i40e_client.h"
static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
static LIST_HEAD(i40e_devices);
static DEFINE_MUTEX(i40e_device_mutex);
static LIST_HEAD(i40e_clients);
static DEFINE_MUTEX(i40e_client_mutex);
static LIST_HEAD(i40e_client_instances);
static DEFINE_MUTEX(i40e_client_instance_mutex);
static int i40e_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
u32 vf_id, u8 *msg, u16 len);
static int i40e_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_client *client,
struct i40e_qvlist_info *qvlist_info);
static void i40e_client_request_reset(struct i40e_info *ldev,
struct i40e_client *client,
u32 reset_level);
static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
struct i40e_client *client,
bool is_vf, u32 vf_id,
u32 flag, u32 valid_flag);
static struct i40e_ops i40e_lan_ops = {
.virtchnl_send = i40e_client_virtchnl_send,
.setup_qvlist = i40e_client_setup_qvlist,
.request_reset = i40e_client_request_reset,
.update_vsi_ctxt = i40e_client_update_vsi_ctxt,
};
/**
* i40e_client_type_to_vsi_type - convert client type to vsi type
* @client_type: the i40e_client type
*
* returns the related vsi type value
**/
static
enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type)
{
switch (type) {
case I40E_CLIENT_IWARP:
return I40E_VSI_IWARP;
case I40E_CLIENT_VMDQ2:
return I40E_VSI_VMDQ2;
default:
pr_err("i40e: Client type unknown\n");
return I40E_VSI_TYPE_UNKNOWN;
}
}
/**
* i40e_client_get_params - Get the params that can change at runtime
* @vsi: the VSI with the message
* @param: clinet param struct
*
**/
static
int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
{
struct i40e_dcbx_config *dcb_cfg = &vsi->back->hw.local_dcbx_config;
int i = 0;
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
u8 tc = dcb_cfg->etscfg.prioritytable[i];
u16 qs_handle;
/* If TC is not enabled for VSI use TC0 for UP */
if (!(vsi->tc_config.enabled_tc & BIT(tc)))
tc = 0;
qs_handle = le16_to_cpu(vsi->info.qs_handle[tc]);
params->qos.prio_qos[i].tc = tc;
params->qos.prio_qos[i].qs_handle = qs_handle;
if (qs_handle == I40E_AQ_VSI_QS_HANDLE_INVALID) {
dev_err(&vsi->back->pdev->dev, "Invalid queue set handle for TC = %d, vsi id = %d\n",
tc, vsi->id);
return -EINVAL;
}
}
params->mtu = vsi->netdev->mtu;
return 0;
}
/**
* i40e_notify_client_of_vf_msg - call the client vf message callback
* @vsi: the VSI with the message
* @vf_id: the absolute VF id that sent the message
* @msg: message buffer
* @len: length of the message
*
* If there is a client to this VSI, call the client
**/
void
i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
{
struct i40e_client_instance *cdev;
if (!vsi)
return;
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if (cdev->lan_info.pf == vsi->back) {
if (!cdev->client ||
!cdev->client->ops ||
!cdev->client->ops->virtchnl_receive) {
dev_dbg(&vsi->back->pdev->dev,
"Cannot locate client instance virtual channel receive routine\n");
continue;
}
cdev->client->ops->virtchnl_receive(&cdev->lan_info,
cdev->client,
vf_id, msg, len);
}
}
mutex_unlock(&i40e_client_instance_mutex);
}
/**
* i40e_notify_client_of_l2_param_changes - call the client notify callback
* @vsi: the VSI with l2 param changes
*
* If there is a client to this VSI, call the client
**/
void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
{
struct i40e_client_instance *cdev;
struct i40e_params params;
if (!vsi)
return;
memset(&params, 0, sizeof(params));
i40e_client_get_params(vsi, &params);
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if (cdev->lan_info.pf == vsi->back) {
if (!cdev->client ||
!cdev->client->ops ||
!cdev->client->ops->l2_param_change) {
dev_dbg(&vsi->back->pdev->dev,
"Cannot locate client instance l2_param_change routine\n");
continue;
}
cdev->lan_info.params = params;
cdev->client->ops->l2_param_change(&cdev->lan_info,
cdev->client,
&params);
}
}
mutex_unlock(&i40e_client_instance_mutex);
}
/**
* i40e_notify_client_of_netdev_open - call the client open callback
* @vsi: the VSI with netdev opened
*
* If there is a client to this netdev, call the client with open
**/
void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
{
struct i40e_client_instance *cdev;
if (!vsi)
return;
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if (cdev->lan_info.netdev == vsi->netdev) {
if (!cdev->client ||
!cdev->client->ops || !cdev->client->ops->open) {
dev_dbg(&vsi->back->pdev->dev,
"Cannot locate client instance open routine\n");
continue;
}
cdev->client->ops->open(&cdev->lan_info, cdev->client);
}
}
mutex_unlock(&i40e_client_instance_mutex);
}
/**
* i40e_client_release_qvlist
* @ldev: pointer to L2 context.
*
**/
static void i40e_client_release_qvlist(struct i40e_info *ldev)
{
struct i40e_qvlist_info *qvlist_info = ldev->qvlist_info;
u32 i;
if (!ldev->qvlist_info)
return;
for (i = 0; i < qvlist_info->num_vectors; i++) {
struct i40e_pf *pf = ldev->pf;
struct i40e_qv_info *qv_info;
u32 reg_idx;
qv_info = &qvlist_info->qv_info[i];
if (!qv_info)
continue;
reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
}
kfree(ldev->qvlist_info);
ldev->qvlist_info = NULL;
}
/**
* i40e_notify_client_of_netdev_close - call the client close callback
* @vsi: the VSI with netdev closed
* @reset: true when close called due to a reset pending
*
* If there is a client to this netdev, call the client with close
**/
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
{
struct i40e_client_instance *cdev;
if (!vsi)
return;
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if (cdev->lan_info.netdev == vsi->netdev) {
if (!cdev->client ||
!cdev->client->ops || !cdev->client->ops->close) {
dev_dbg(&vsi->back->pdev->dev,
"Cannot locate client instance close routine\n");
continue;
}
cdev->client->ops->close(&cdev->lan_info, cdev->client,
reset);
i40e_client_release_qvlist(&cdev->lan_info);
}
}
mutex_unlock(&i40e_client_instance_mutex);
}
/**
* i40e_notify_client_of_vf_reset - call the client vf reset callback
* @pf: PF device pointer
* @vf_id: asolute id of VF being reset
*
* If there is a client attached to this PF, notify when a VF is reset
**/
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
{
struct i40e_client_instance *cdev;
if (!pf)
return;
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if (cdev->lan_info.pf == pf) {
if (!cdev->client ||
!cdev->client->ops ||
!cdev->client->ops->vf_reset) {
dev_dbg(&pf->pdev->dev,
"Cannot locate client instance VF reset routine\n");
continue;
}
cdev->client->ops->vf_reset(&cdev->lan_info,
cdev->client, vf_id);
}
}
mutex_unlock(&i40e_client_instance_mutex);
}
/**
* i40e_notify_client_of_vf_enable - call the client vf notification callback
* @pf: PF device pointer
* @num_vfs: the number of VFs currently enabled, 0 for disable
*
* If there is a client attached to this PF, call its VF notification routine
**/
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
{
struct i40e_client_instance *cdev;
if (!pf)
return;
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if (cdev->lan_info.pf == pf) {
if (!cdev->client ||
!cdev->client->ops ||
!cdev->client->ops->vf_enable) {
dev_dbg(&pf->pdev->dev,
"Cannot locate client instance VF enable routine\n");
continue;
}
cdev->client->ops->vf_enable(&cdev->lan_info,
cdev->client, num_vfs);
}
}
mutex_unlock(&i40e_client_instance_mutex);
}
/**
* i40e_vf_client_capable - ask the client if it likes the specified VF
* @pf: PF device pointer
* @vf_id: the VF in question
*
* If there is a client of the specified type attached to this PF, call
* its vf_capable routine
**/
int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
enum i40e_client_type type)
{
struct i40e_client_instance *cdev;
int capable = false;
if (!pf)
return false;
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if (cdev->lan_info.pf == pf) {
if (!cdev->client ||
!cdev->client->ops ||
!cdev->client->ops->vf_capable ||
!(cdev->client->type == type)) {
dev_dbg(&pf->pdev->dev,
"Cannot locate client instance VF capability routine\n");
continue;
}
capable = cdev->client->ops->vf_capable(&cdev->lan_info,
cdev->client,
vf_id);
break;
}
}
mutex_unlock(&i40e_client_instance_mutex);
return capable;
}
/**
* i40e_vsi_lookup - finds a matching VSI from the PF list starting at start_vsi
* @pf: board private structure
* @type: vsi type
* @start_vsi: a VSI pointer from where to start the search
*
* Returns non NULL on success or NULL for failure
**/
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
enum i40e_vsi_type type,
struct i40e_vsi *start_vsi)
{
struct i40e_vsi *vsi;
int i = 0;
if (start_vsi) {
for (i = 0; i < pf->num_alloc_vsi; i++) {
vsi = pf->vsi[i];
if (vsi == start_vsi)
break;
}
}
for (; i < pf->num_alloc_vsi; i++) {
vsi = pf->vsi[i];
if (vsi && vsi->type == type)
return vsi;
}
return NULL;
}
/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
* @client: pointer to a client struct in the client list.
*
* Returns cdev ptr on success, NULL on failure
**/
static
struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
struct i40e_client *client)
{
struct i40e_client_instance *cdev;
struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
cdev = NULL;
goto out;
}
}
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
goto out;
cdev->lan_info.pf = (void *)pf;
cdev->lan_info.netdev = vsi->netdev;
cdev->lan_info.pcidev = pf->pdev;
cdev->lan_info.fid = pf->hw.pf_id;
cdev->lan_info.ftype = I40E_CLIENT_FTYPE_PF;
cdev->lan_info.hw_addr = pf->hw.hw_addr;
cdev->lan_info.ops = &i40e_lan_ops;
cdev->lan_info.version.major = I40E_CLIENT_VERSION_MAJOR;
cdev->lan_info.version.minor = I40E_CLIENT_VERSION_MINOR;
cdev->lan_info.version.build = I40E_CLIENT_VERSION_BUILD;
cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver;
cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver;
cdev->lan_info.fw_build = pf->hw.aq.fw_build;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
kfree(cdev);
cdev = NULL;
goto out;
}
cdev->lan_info.msix_count = pf->num_iwarp_msix;
cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
if (mac)
ether_addr_copy(cdev->lan_info.lanmac, mac->addr);
else
dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
cdev->client = client;
INIT_LIST_HEAD(&cdev->list);
list_add(&cdev->list, &i40e_client_instances);
out:
mutex_unlock(&i40e_client_instance_mutex);
return cdev;
}
/**
* i40e_client_del_instance - removes a client instance from the list
* @pf: pointer to the board struct
*
* Returns 0 on success or non-0 on error
**/
static
int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
{
struct i40e_client_instance *cdev, *tmp;
int ret = -ENODEV;
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
if ((cdev->lan_info.pf != pf) || (cdev->client != client))
continue;
dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n",
client->name, pf->hw.pf_id,
pf->hw.bus.device, pf->hw.bus.func);
list_del(&cdev->list);
kfree(cdev);
ret = 0;
break;
}
mutex_unlock(&i40e_client_instance_mutex);
return ret;
}
/**
* i40e_client_subtask - client maintenance work
* @pf: board private structure
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
struct i40e_client_instance *cdev;
struct i40e_client *client;
int ret = 0;
if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
return;
pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
/* If we're down or resetting, just bail */
if (test_bit(__I40E_DOWN, &pf->state) ||
test_bit(__I40E_CONFIG_BUSY, &pf->state))
return;
/* Check client state and instantiate client if client registered */
mutex_lock(&i40e_client_mutex);
list_for_each_entry(client, &i40e_clients, list) {
/* first check client is registered */
if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state))
continue;
/* Do we also need the LAN VSI to be up, to create instance */
if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) {
/* check if L2 VSI is up, if not we are not ready */
if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
continue;
}
/* Add the client instance to the instance list */
cdev = i40e_client_add_instance(pf, client);
if (!cdev)
continue;
/* Also up the ref_cnt of no. of instances of this client */
atomic_inc(&client->ref_cnt);
dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
client->name, pf->hw.pf_id,
pf->hw.bus.device, pf->hw.bus.func);
/* Send an Open request to the client */
atomic_inc(&cdev->ref_cnt);
if (client->ops && client->ops->open)
ret = client->ops->open(&cdev->lan_info, client);
atomic_dec(&cdev->ref_cnt);
if (!ret) {
set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
} else {
/* remove client instance */
i40e_client_del_instance(pf, client);
atomic_dec(&client->ref_cnt);
continue;
}
}
mutex_unlock(&i40e_client_mutex);
}
/**
* i40e_lan_add_device - add a lan device struct to the list of lan devices
* @pf: pointer to the board struct
*
* Returns 0 on success or none 0 on error
**/
int i40e_lan_add_device(struct i40e_pf *pf)
{
struct i40e_device *ldev;
int ret = 0;
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
if (ldev->pf == pf) {
ret = -EEXIST;
goto out;
}
}
ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
if (!ldev) {
ret = -ENOMEM;
goto out;
}
ldev->pf = pf;
INIT_LIST_HEAD(&ldev->list);
list_add(&ldev->list, &i40e_devices);
dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n",
pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
/* Since in some cases register may have happened before a device gets
* added, we can schedule a subtask to go initiate the clients.
*/
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
out:
mutex_unlock(&i40e_device_mutex);
return ret;
}
/**
* i40e_lan_del_device - removes a lan device from the device list
* @pf: pointer to the board struct
*
* Returns 0 on success or non-0 on error
**/
int i40e_lan_del_device(struct i40e_pf *pf)
{
struct i40e_device *ldev, *tmp;
int ret = -ENODEV;
mutex_lock(&i40e_device_mutex);
list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
if (ldev->pf == pf) {
dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n",
pf->hw.pf_id, pf->hw.bus.device,
pf->hw.bus.func);
list_del(&ldev->list);
kfree(ldev);
ret = 0;
break;
}
}
mutex_unlock(&i40e_device_mutex);
return ret;
}
/**
* i40e_client_release - release client specific resources
* @client: pointer to the registered client
*
* Return 0 on success or < 0 on error
**/
static int i40e_client_release(struct i40e_client *client)
{
struct i40e_client_instance *cdev, *tmp;
struct i40e_pf *pf = NULL;
int ret = 0;
LIST_HEAD(cdevs_tmp);
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
if (strncmp(cdev->client->name, client->name,
I40E_CLIENT_STR_LENGTH))
continue;
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (atomic_read(&cdev->ref_cnt) > 0) {
ret = I40E_ERR_NOT_READY;
goto out;
}
pf = (struct i40e_pf *)cdev->lan_info.pf;
if (client->ops && client->ops->close)
client->ops->close(&cdev->lan_info, client,
false);
i40e_client_release_qvlist(&cdev->lan_info);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
dev_warn(&pf->pdev->dev,
"Client %s instance for PF id %d closed\n",
client->name, pf->hw.pf_id);
}
/* delete the client instance from the list */
list_del(&cdev->list);
list_add(&cdev->list, &cdevs_tmp);
atomic_dec(&client->ref_cnt);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
}
out:
mutex_unlock(&i40e_client_instance_mutex);
/* free the client device and release its vsi */
list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) {
kfree(cdev);
}
return ret;
}
/**
* i40e_client_prepare - prepare client specific resources
* @client: pointer to the registered client
*
* Return 0 on success or < 0 on error
**/
static int i40e_client_prepare(struct i40e_client *client)
{
struct i40e_device *ldev;
struct i40e_pf *pf;
int ret = 0;
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
pf = ldev->pf;
/* Start the client subtask */
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
}
mutex_unlock(&i40e_device_mutex);
return ret;
}
/**
* i40e_client_virtchnl_send - TBD
* @ldev: pointer to L2 context
* @client: Client pointer
* @vf_id: absolute VF identifier
* @msg: message buffer
* @len: length of message buffer
*
* Return 0 on success or < 0 on error
**/
static int i40e_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
u32 vf_id, u8 *msg, u16 len)
{
struct i40e_pf *pf = ldev->pf;
struct i40e_hw *hw = &pf->hw;
i40e_status err;
err = i40e_aq_send_msg_to_vf(hw, vf_id, I40E_VIRTCHNL_OP_IWARP,
0, msg, len, NULL);
if (err)
dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
err, hw->aq.asq_last_status);
return err;
}
/**
* i40e_client_setup_qvlist
* @ldev: pointer to L2 context.
* @client: Client pointer.
* @qv_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
static int i40e_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_client *client,
struct i40e_qvlist_info *qvlist_info)
{
struct i40e_pf *pf = ldev->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 size;
size = sizeof(struct i40e_qvlist_info) +
(sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
if (!qv_info)
continue;
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this client */
if ((v_idx >= (pf->iwarp_base_vector + pf->num_iwarp_msix)) ||
(v_idx < pf->iwarp_base_vector))
goto err;
ldev->qvlist_info->qv_info[i] = *qv_info;
reg_idx = I40E_PFINT_LNKLSTN(v_idx - 1);
if (qv_info->ceq_idx == I40E_QUEUE_INVALID_IDX) {
/* Special case - No CEQ mapped on this vector */
wr32(hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
} else {
reg = (qv_info->ceq_idx &
I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
(I40E_QUEUE_TYPE_PE_CEQ <<
I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
wr32(hw, reg_idx, reg);
reg = (I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
(v_idx << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
(qv_info->itr_idx <<
I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
(I40E_QUEUE_END_OF_LIST <<
I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT));
wr32(hw, I40E_PFINT_CEQCTL(qv_info->ceq_idx), reg);
}
if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
reg = (I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
(v_idx << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
(qv_info->itr_idx <<
I40E_PFINT_AEQCTL_ITR_INDX_SHIFT));
wr32(hw, I40E_PFINT_AEQCTL, reg);
}
}
return 0;
err:
kfree(ldev->qvlist_info);
ldev->qvlist_info = NULL;
return -EINVAL;
}
/**
* i40e_client_request_reset
* @ldev: pointer to L2 context.
* @client: Client pointer.
* @level: reset level
**/
static void i40e_client_request_reset(struct i40e_info *ldev,
struct i40e_client *client,
u32 reset_level)
{
struct i40e_pf *pf = ldev->pf;
switch (reset_level) {
case I40E_CLIENT_RESET_LEVEL_PF:
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
break;
case I40E_CLIENT_RESET_LEVEL_CORE:
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
break;
default:
dev_warn(&pf->pdev->dev,
"Client %s instance for PF id %d request an unsupported reset: %d.\n",
client->name, pf->hw.pf_id, reset_level);
break;
}
i40e_service_event_schedule(pf);
}
/**
* i40e_client_update_vsi_ctxt
* @ldev: pointer to L2 context.
* @client: Client pointer.
* @is_vf: if this for the VF
* @vf_id: if is_vf true this carries the vf_id
* @flag: Any device level setting that needs to be done for PE
* @valid_flag: Bits in this match up and enable changing of flag bits
*
* Return 0 on success or < 0 on error
**/
static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
struct i40e_client *client,
bool is_vf, u32 vf_id,
u32 flag, u32 valid_flag)
{
struct i40e_pf *pf = ldev->pf;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
bool update = true;
i40e_status err;
/* TODO: for now do not allow setting VF's VSI setting */
if (is_vf)
return -EINVAL;
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (err) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
}
if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
ctxt.info.valid_sections =
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
} else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
!(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
ctxt.info.valid_sections =
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
} else {
update = false;
dev_warn(&pf->pdev->dev,
"Client %s instance for PF id %d request an unsupported Config: %x.\n",
client->name, pf->hw.pf_id, flag);
}
if (update) {
err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (err) {
dev_info(&pf->pdev->dev,
"update VSI ctxt for PE failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
}
return err;
}
/**
* i40e_register_client - Register a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
*
* Returns 0 on success or non-0 on error
**/
int i40e_register_client(struct i40e_client *client)
{
int ret = 0;
enum i40e_vsi_type vsi_type;
if (!client) {
ret = -EIO;
goto out;
}
if (strlen(client->name) == 0) {
pr_info("i40e: Failed to register client with no name\n");
ret = -EIO;
goto out;
}
mutex_lock(&i40e_client_mutex);
if (i40e_client_is_registered(client)) {
pr_info("i40e: Client %s has already been registered!\n",
client->name);
mutex_unlock(&i40e_client_mutex);
ret = -EEXIST;
goto out;
}
if ((client->version.major != I40E_CLIENT_VERSION_MAJOR) ||
(client->version.minor != I40E_CLIENT_VERSION_MINOR)) {
pr_info("i40e: Failed to register client %s due to mismatched client interface version\n",
client->name);
pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
client->version.major, client->version.minor,
client->version.build,
i40e_client_interface_version_str);
mutex_unlock(&i40e_client_mutex);
ret = -EIO;
goto out;
}
vsi_type = i40e_client_type_to_vsi_type(client->type);
if (vsi_type == I40E_VSI_TYPE_UNKNOWN) {
pr_info("i40e: Failed to register client %s due to unknown client type %d\n",
client->name, client->type);
mutex_unlock(&i40e_client_mutex);
ret = -EIO;
goto out;
}
list_add(&client->list, &i40e_clients);
set_bit(__I40E_CLIENT_REGISTERED, &client->state);
mutex_unlock(&i40e_client_mutex);
if (i40e_client_prepare(client)) {
ret = -EIO;
goto out;
}
pr_info("i40e: Registered client %s with return code %d\n",
client->name, ret);
out:
return ret;
}
EXPORT_SYMBOL(i40e_register_client);
/**
* i40e_unregister_client - Unregister a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
*
* Returns 0 on success or non-0 on error
**/
int i40e_unregister_client(struct i40e_client *client)
{
int ret = 0;
/* When a unregister request comes through we would have to send
* a close for each of the client instances that were opened.
* client_release function is called to handle this.
*/
if (!client || i40e_client_release(client)) {
ret = -EIO;
goto out;
}
/* TODO: check if device is in reset, or if that matters? */
mutex_lock(&i40e_client_mutex);
if (!i40e_client_is_registered(client)) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
mutex_unlock(&i40e_client_mutex);
ret = -ENODEV;
goto out;
}
if (atomic_read(&client->ref_cnt) == 0) {
clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
list_del(&client->list);
pr_info("i40e: Unregistered client %s with return code %d\n",
client->name, ret);
} else {
ret = I40E_ERR_NOT_READY;
pr_err("i40e: Client %s failed unregister - client has open instances\n",
client->name);
}
mutex_unlock(&i40e_client_mutex);
out:
return ret;
}
EXPORT_SYMBOL(i40e_unregister_client);
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_CLIENT_H_
#define _I40E_CLIENT_H_
#define I40E_CLIENT_STR_LENGTH 10
/* Client interface version should be updated anytime there is a change in the
* existing APIs or data structures.
*/
#define I40E_CLIENT_VERSION_MAJOR 0
#define I40E_CLIENT_VERSION_MINOR 01
#define I40E_CLIENT_VERSION_BUILD 00
#define I40E_CLIENT_VERSION_STR \
XSTRINGIFY(I40E_CLIENT_VERSION_MAJOR) "." \
XSTRINGIFY(I40E_CLIENT_VERSION_MINOR) "." \
XSTRINGIFY(I40E_CLIENT_VERSION_BUILD)
struct i40e_client_version {
u8 major;
u8 minor;
u8 build;
u8 rsvd;
};
enum i40e_client_state {
__I40E_CLIENT_NULL,
__I40E_CLIENT_REGISTERED
};
enum i40e_client_instance_state {
__I40E_CLIENT_INSTANCE_NONE,
__I40E_CLIENT_INSTANCE_OPENED,
};
enum i40e_client_type {
I40E_CLIENT_IWARP,
I40E_CLIENT_VMDQ2
};
struct i40e_ops;
struct i40e_client;
/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
* In order for us to keep the interface simple, SW will define a
* unique type value for AEQ.
*/
#define I40E_QUEUE_TYPE_PE_AEQ 0x80
#define I40E_QUEUE_INVALID_IDX 0xFFFF
struct i40e_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
};
struct i40e_qvlist_info {
u32 num_vectors;
struct i40e_qv_info qv_info[1];
};
#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
/* set of LAN parameters useful for clients managed by LAN */
/* Struct to hold per priority info */
struct i40e_prio_qos_params {
u16 qs_handle; /* qs handle for prio */
u8 tc; /* TC mapped to prio */
u8 reserved;
};
#define I40E_CLIENT_MAX_USER_PRIORITY 8
/* Struct to hold Client QoS */
struct i40e_qos_params {
struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
};
struct i40e_params {
struct i40e_qos_params qos;
u16 mtu;
};
/* Structure to hold Lan device info for a client device */
struct i40e_info {
struct i40e_client_version version;
u8 lanmac[6];
struct net_device *netdev;
struct pci_dev *pcidev;
u8 __iomem *hw_addr;
u8 fid; /* function id, PF id or VF id */
#define I40E_CLIENT_FTYPE_PF 0
#define I40E_CLIENT_FTYPE_VF 1
u8 ftype; /* function type, PF or VF */
void *pf;
/* All L2 params that could change during the life span of the PF
* and needs to be communicated to the client when they change
*/
struct i40e_qvlist_info *qvlist_info;
struct i40e_params params;
struct i40e_ops *ops;
u16 msix_count; /* number of msix vectors*/
/* Array down below will be dynamically allocated based on msix_count */
struct msix_entry *msix_entries;
u16 itr_index; /* Which ITR index the PE driver is suppose to use */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
u32 fw_build; /* firmware build number */
};
#define I40E_CLIENT_RESET_LEVEL_PF 1
#define I40E_CLIENT_RESET_LEVEL_CORE 2
#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1)
struct i40e_ops {
/* setup_q_vector_list enables queues with a particular vector */
int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
struct i40e_qvlist_info *qv_info);
int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
u32 vf_id, u8 *msg, u16 len);
/* If the PE Engine is unresponsive, RDMA driver can request a reset.
* The level helps determine the level of reset being requested.
*/
void (*request_reset)(struct i40e_info *ldev,
struct i40e_client *client, u32 level);
/* API for the RDMA driver to set certain VSI flags that control
* PE Engine.
*/
int (*update_vsi_ctxt)(struct i40e_info *ldev,
struct i40e_client *client,
bool is_vf, u32 vf_id,
u32 flag, u32 valid_flag);
};
struct i40e_client_ops {
/* Should be called from register_client() or whenever PF is ready
* to create a specific client instance.
*/
int (*open)(struct i40e_info *ldev, struct i40e_client *client);
/* Should be called when netdev is unavailable or when unregister
* call comes in. If the close is happenening due to a reset being
* triggered set the reset bit to true.
*/
void (*close)(struct i40e_info *ldev, struct i40e_client *client,
bool reset);
/* called when some l2 managed parameters changes - mtu */
void (*l2_param_change)(struct i40e_info *ldev,
struct i40e_client *client,
struct i40e_params *params);
int (*virtchnl_receive)(struct i40e_info *ldev,
struct i40e_client *client, u32 vf_id,
u8 *msg, u16 len);
/* called when a VF is reset by the PF */
void (*vf_reset)(struct i40e_info *ldev,
struct i40e_client *client, u32 vf_id);
/* called when the number of VFs changes */
void (*vf_enable)(struct i40e_info *ldev,
struct i40e_client *client, u32 num_vfs);
/* returns true if VF is capable of specified offload */
int (*vf_capable)(struct i40e_info *ldev,
struct i40e_client *client, u32 vf_id);
};
/* Client device */
struct i40e_client_instance {
struct list_head list;
struct i40e_info lan_info;
struct i40e_client *client;
unsigned long state;
/* A count of all the in-progress calls to the client */
atomic_t ref_cnt;
};
struct i40e_client {
struct list_head list; /* list of registered clients */
char name[I40E_CLIENT_STR_LENGTH];
struct i40e_client_version version;
unsigned long state; /* client state */
atomic_t ref_cnt; /* Count of all the client devices of this kind */
u32 flags;
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
enum i40e_client_type type;
struct i40e_client_ops *ops; /* client ops provided by the client */
};
static inline bool i40e_client_is_registered(struct i40e_client *client)
{
return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
}
/* used by clients */
int i40e_register_client(struct i40e_client *client);
int i40e_unregister_client(struct i40e_client *client);
#endif /* _I40E_CLIENT_H_ */
......@@ -290,7 +290,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
*
* If not already scheduled, this puts the task into the work queue
**/
static void i40e_service_event_schedule(struct i40e_pf *pf)
void i40e_service_event_schedule(struct i40e_pf *pf)
{
if (!test_bit(__I40E_DOWN, &pf->state) &&
!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
......@@ -2212,7 +2212,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
i40e_notify_client_of_l2_param_changes(vsi);
return 0;
}
......@@ -4166,6 +4166,9 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
free_irq(pf->msix_entries[0].vector, pf);
}
i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
I40E_IWARP_IRQ_PILE_ID);
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
......@@ -4209,12 +4212,17 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
**/
static void i40e_vsi_close(struct i40e_vsi *vsi)
{
bool reset = false;
if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
i40e_down(vsi);
i40e_vsi_free_irq(vsi);
i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi);
vsi->current_netdev_flags = 0;
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
reset = true;
i40e_notify_client_of_netdev_close(vsi, reset);
}
/**
......@@ -4831,6 +4839,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.info = vsi->info;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
}
/* Update the VSI after updating the VSI queue-mapping information */
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
......@@ -4974,6 +4988,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
}
}
......@@ -5173,6 +5188,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
}
i40e_fdir_filter_restore(vsi);
}
/* On the next run of the service_task, notify any clients of the new
* opened netdev
*/
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
return 0;
......@@ -5351,6 +5371,8 @@ int i40e_open(struct net_device *netdev)
geneve_get_rx_port(netdev);
#endif
i40e_notify_client_of_netdev_open(vsi);
return 0;
}
......@@ -6015,6 +6037,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
case I40E_VSI_CTRL:
case I40E_VSI_IWARP:
case I40E_VSI_MIRROR:
default:
/* there is no notification for other VSIs */
......@@ -7116,6 +7139,7 @@ static void i40e_service_task(struct work_struct *work)
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
i40e_client_subtask(pf);
i40e_sync_filters_subtask(pf);
i40e_sync_udp_filters_subtask(pf);
i40e_clean_adminq_subtask(pf);
......@@ -7520,6 +7544,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
int vectors_left;
int v_budget, i;
int v_actual;
int iwarp_requested = 0;
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
return -ENODEV;
......@@ -7533,6 +7558,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
* is governed by number of cpus in the system.
* - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* - The CPU count within the NUMA node if iWARP is enabled
#ifdef I40E_FCOE
* - The number of FCOE qps.
#endif
......@@ -7579,6 +7605,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
#endif
/* can we reserve enough for iWARP? */
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
if (!vectors_left)
pf->num_iwarp_msix = 0;
else if (vectors_left < pf->num_iwarp_msix)
pf->num_iwarp_msix = 1;
v_budget += pf->num_iwarp_msix;
vectors_left -= pf->num_iwarp_msix;
}
/* any vectors left over go for VMDq support */
if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
......@@ -7613,6 +7649,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
* of these features based on the policy and at the end disable
* the features that did not get any vectors.
*/
iwarp_requested = pf->num_iwarp_msix;
pf->num_iwarp_msix = 0;
#ifdef I40E_FCOE
pf->num_fcoe_qps = 0;
pf->num_fcoe_msix = 0;
......@@ -7651,17 +7689,33 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_msix = 1;
break;
case 3:
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
pf->num_lan_msix = 1;
pf->num_iwarp_msix = 1;
} else {
pf->num_lan_msix = 2;
}
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
pf->num_lan_msix = 1;
pf->num_fcoe_msix = 1;
}
#else
pf->num_lan_msix = 2;
#endif
break;
default:
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
pf->num_iwarp_msix = min_t(int, (vec / 3),
iwarp_requested);
pf->num_vmdq_vsis = min_t(int, (vec / 3),
I40E_DEFAULT_NUM_VMDQ_VSI);
} else {
pf->num_vmdq_vsis = min_t(int, (vec / 2),
I40E_DEFAULT_NUM_VMDQ_VSI);
}
pf->num_lan_msix = min_t(int,
(vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
pf->num_lan_msix);
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
......@@ -7669,8 +7723,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
vec--;
}
#endif
/* give the rest to the PF */
pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
break;
}
}
......@@ -7680,6 +7732,12 @@ static int i40e_init_msix(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
}
if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
(pf->num_iwarp_msix == 0)) {
dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
}
#ifdef I40E_FCOE
if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
......@@ -7771,6 +7829,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
vectors = i40e_init_msix(pf);
if (vectors < 0) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
......@@ -8373,6 +8432,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
if (pf->hw.func_caps.iwarp) {
pf->flags |= I40E_FLAG_IWARP_ENABLED;
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
#ifdef I40E_FCOE
i40e_init_pf_fcoe(pf);
......@@ -9216,6 +9281,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
}
if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |=
I40E_AQ_VSI_QUE_OPT_TCP_ENA;
}
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
if (pf->vf[vsi->vf_id].spoofchk) {
......@@ -9239,6 +9311,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
break;
#endif /* I40E_FCOE */
case I40E_VSI_IWARP:
/* send down message to iWARP */
break;
default:
return -ENODEV;
}
......@@ -10350,6 +10426,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
......@@ -10367,6 +10444,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
......@@ -10959,7 +11037,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif /* CONFIG_PCI_IOV */
pfs_found++;
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
pf->num_iwarp_msix,
I40E_IWARP_IRQ_PILE_ID);
if (pf->iwarp_base_vector < 0) {
dev_info(&pdev->dev,
"failed to get tracking for %d vectors for IWARP err=%d\n",
pf->num_iwarp_msix, pf->iwarp_base_vector);
pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
}
}
i40e_dbg_pf_init(pf);
......@@ -10970,6 +11058,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
/* add this PF to client device list and launch a client service task */
err = i40e_lan_add_device(pf);
if (err)
dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
err);
#ifdef I40E_FCOE
/* create FCoE interface */
i40e_fcoe_vsi_setup(pf);
......@@ -11140,6 +11234,13 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
/* remove attached clients */
ret_code = i40e_lan_del_device(pf);
if (ret_code) {
dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
ret_code);
}
/* shutdown and destroy the HMC */
if (pf->hw.hmc.hmc_obj) {
ret_code = i40e_shutdown_lan_hmc(&pf->hw);
......
......@@ -78,7 +78,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
I40E_DEBUG_IWARP = 0x00F00000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
......@@ -144,6 +144,7 @@ enum i40e_vsi_type {
I40E_VSI_MIRROR = 5,
I40E_VSI_SRIOV = 6,
I40E_VSI_FDIR = 7,
I40E_VSI_IWARP = 8,
I40E_VSI_TYPE_UNKNOWN
};
......
......@@ -81,6 +81,9 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
I40E_VIRTCHNL_OP_IWARP = 20,
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
};
/* Virtual channel message descriptor. This overlays the admin queue
......@@ -348,6 +351,37 @@ struct i40e_virtchnl_pf_event {
int severity;
};
/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
* VF uses this message to request PF to map IWARP vectors to IWARP queues.
* The request for this originates from the VF IWARP driver through
* a client interface between VF LAN and VF IWARP driver.
* A vector could have an AEQ and CEQ attached to it although
* there is a single AEQ per VF IWARP instance in which case
* most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
* There will never be a case where there will be multiple CEQs attached
* to a single vector.
* PF configures interrupt mapping and returns status.
*/
/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
* In order for us to keep the interface simple, SW will define a
* unique type value for AEQ.
*/
#define I40E_QUEUE_TYPE_PE_AEQ 0x80
#define I40E_QUEUE_INVALID_IDX 0xFFFF
struct i40e_virtchnl_iwarp_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
};
struct i40e_virtchnl_iwarp_qvlist_info {
u32 num_vectors;
struct i40e_virtchnl_iwarp_qv_info qv_info[1];
};
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
......
......@@ -351,6 +351,136 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
i40e_flush(hw);
}
/**
* i40e_release_iwarp_qvlist
* @vf: pointer to the VF.
*
**/
static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
u32 msix_vf;
u32 i;
if (!vf->qvlist_info)
return;
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
struct i40e_virtchnl_iwarp_qv_info *qv_info;
u32 next_q_index, next_q_type;
struct i40e_hw *hw = &pf->hw;
u32 v_idx, reg_idx, reg;
qv_info = &qvlist_info->qv_info[i];
if (!qv_info)
continue;
v_idx = qv_info->v_idx;
if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
/* Figure out the queue after CEQ and make that the
* first queue.
*/
reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
>> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
>> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
reg = (next_q_index &
I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
(next_q_type <<
I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
}
}
kfree(vf->qvlist_info);
vf->qvlist_info = NULL;
}
/**
* i40e_config_iwarp_qvlist
* @vf: pointer to the VF info
* @qvlist_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_virtchnl_iwarp_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
u32 msix_vf, size;
size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
(sizeof(struct i40e_virtchnl_iwarp_qv_info) *
(qvlist_info->num_vectors - 1));
vf->qvlist_info = kzalloc(size, GFP_KERNEL);
vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
if (!qv_info)
continue;
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this vf */
if (!i40e_vc_isvalid_vector_id(vf, v_idx))
goto err;
vf->qvlist_info->qv_info[i] = *qv_info;
reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
/* We might be sharing the interrupt, so get the first queue
* index and type, push it down the list by adding the new
* queue on top. Also link it with the new queue in CEQCTL.
*/
reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
reg = (qv_info->ceq_idx &
I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
(I40E_QUEUE_TYPE_PE_CEQ <<
I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
}
if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
}
}
return 0;
err:
kfree(vf->qvlist_info);
vf->qvlist_info = NULL;
return -EINVAL;
}
/**
* i40e_config_vsi_tx_queue
* @vf: pointer to the VF info
......@@ -849,9 +979,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
/* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
if (!i40e_alloc_vf_res(vf)) {
int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_enable_vf_mappings(vf);
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
i40e_notify_client_of_vf_reset(pf, abs_vf_id);
}
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
......@@ -876,11 +1008,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
usleep_range(1000, 2000);
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
false);
i40e_notify_client_of_vf_enable(pf, 0);
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
......@@ -952,6 +1080,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
goto err_iov;
}
}
i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
/* allocate memory */
vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
if (!vfs) {
......@@ -1205,6 +1334,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) &&
(vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
}
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
vfres->vf_offload_flags |=
......@@ -1813,6 +1949,72 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
}
/**
* i40e_vc_iwarp_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* called from the VF for the iwarp msgs
**/
static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
struct i40e_pf *pf = vf->pf;
int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
msg, msglen);
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP,
aq_ret);
}
/**
* i40e_vc_iwarp_qvmap_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
* @config: config qvmap or release it
*
* called from the VF for the iwarp msgs
**/
static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
bool config)
{
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info =
(struct i40e_virtchnl_iwarp_qvlist_info *)msg;
i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
if (config) {
if (i40e_config_iwarp_qvlist(vf, qvlist_info))
aq_ret = I40E_ERR_PARAM;
} else {
i40e_release_iwarp_qvlist(vf);
}
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP :
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
aq_ret);
}
/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the VF info
......@@ -1908,6 +2110,32 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct i40e_virtchnl_queue_select);
break;
case I40E_VIRTCHNL_OP_IWARP:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
*/
if (msglen)
valid_len = msglen;
else
err_msg_format = true;
break;
case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
valid_len = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info);
if (msglen >= valid_len) {
struct i40e_virtchnl_iwarp_qvlist_info *qv =
(struct i40e_virtchnl_iwarp_qvlist_info *)msg;
if (qv->num_vectors == 0) {
err_msg_format = true;
break;
}
valid_len += ((qv->num_vectors - 1) *
sizeof(struct i40e_virtchnl_iwarp_qv_info));
}
break;
/* These are always errors coming from the VF. */
case I40E_VIRTCHNL_OP_EVENT:
case I40E_VIRTCHNL_OP_UNKNOWN:
......@@ -1997,6 +2225,15 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
case I40E_VIRTCHNL_OP_IWARP:
ret = i40e_vc_iwarp_msg(vf, msg, msglen);
break;
case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
break;
case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
......
......@@ -58,6 +58,7 @@ enum i40e_queue_ctrl {
enum i40e_vf_states {
I40E_VF_STAT_INIT = 0,
I40E_VF_STAT_ACTIVE,
I40E_VF_STAT_IWARPENA,
I40E_VF_STAT_FCOEENA,
I40E_VF_STAT_DISABLED,
};
......@@ -66,6 +67,7 @@ enum i40e_vf_states {
enum i40e_vf_capabilities {
I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
I40E_VIRTCHNL_VF_CAP_L2,
I40E_VIRTCHNL_VF_CAP_IWARP,
};
/* VF information structure */
......@@ -106,6 +108,8 @@ struct i40e_vf {
bool link_forced;
bool link_up; /* only valid if VF link is forced */
bool spoofchk;
/* RDMA Client */
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
};
void i40e_free_vfs(struct i40e_pf *pf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment