Commit dbb0b6ca authored by David S. Miller's avatar David S. Miller

Merge branch '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

From: Tony Nguyen <anthony.l.nguyen@intel.com>
To: davem@davemloft.net, kuba@kernel.org, pabeni@redhat.com,
	edumazet@google.com, netdev@vger.kernel.org
Cc: Tony Nguyen <anthony.l.nguyen@intel.com>, alan.brady@intel.com
Tony Nguyen says:

====================
idpf: refactor virtchnl messages

Alan Brady says:

The motivation for this series has two primary goals. We want to enable
support of multiple simultaneous messages and make the channel more
robust. The way it works right now, the driver can only send and receive
a single message at a time and if something goes really wrong, it can
lead to data corruption and strange bugs.

To start the series, we introduce an idpf_virtchnl.h file. This reduces
the burden on idpf.h which is overloaded with struct and function
declarations.

The conversion works by conceptualizing a send and receive as a
"virtchnl transaction" (idpf_vc_xn) and introducing a "transaction
manager" (idpf_vc_xn_manager). The vcxn_mngr will init a ring of
transactions from which the driver will pop from a bitmap of free
transactions to track in-flight messages. Instead of needing to handle a
complicated send/recv for every a message, the driver now just needs to
fill out a xn_params struct and hand it over to idpf_vc_xn_exec which
will take care of all the messy bits. Once a message is sent and
receives a reply, we leverage the completion API to signal the received
buffer is ready to be used (assuming success, or an error code
otherwise).

At a low-level, this implements the "sw cookie" field of the virtchnl
message descriptor to enable this. We have 16 bits we can put whatever
we want and the recipient is required to apply the same cookie to the
reply for that message.  We use the first 8 bits as an index into the
array of transactions to enable fast lookups and we use the second 8
bits as a salt to make sure each cookie is unique for that message. As
transactions are received in arbitrary order, it's possible to reuse a
transaction index and the salt guards against index conflicts to make
certain the lookup is correct. As a primitive example, say index 1 is
used with salt 1. The message times out without receiving a reply so
index 1 is renewed to be ready for a new transaction, we report the
timeout, and send the message again. Since index 1 is free to be used
again now, index 1 is again sent but now salt is 2. This time we do get
a reply, however it could be that the reply is _actually_ for the
previous send index 1 with salt 1.  Without the salt we would have no
way of knowing for sure if it's the correct reply, but with we will know
for certain.

Through this conversion we also get several other benefits. We can now
more appropriately handle asynchronously sent messages by providing
space for a callback to be defined. This notably allows us to handle MAC
filter failures better; previously we could potentially have stale,
failed filters in our list, which shouldn't really have a major impact
but is obviously not correct. I also managed to remove fairly
significant more lines than I added which is a win in my book.

Additionally, this converts some variables to use auto-variables where
appropriate. This makes the alloc paths much cleaner and less prone to
memory leaks. We also fix a few virtchnl related bugs while we're here.

====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 784ee615 6009e63c
...@@ -37,8 +37,6 @@ struct idpf_vport_max_q; ...@@ -37,8 +37,6 @@ struct idpf_vport_max_q;
#define IDPF_MB_MAX_ERR 20 #define IDPF_MB_MAX_ERR 20
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \ #define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz)) ((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
#define IDPF_WAIT_FOR_EVENT_TIMEO 60000
#define IDPF_MAX_WAIT 500 #define IDPF_MAX_WAIT 500
...@@ -66,14 +64,12 @@ struct idpf_mac_filter { ...@@ -66,14 +64,12 @@ struct idpf_mac_filter {
/** /**
* enum idpf_state - State machine to handle bring up * enum idpf_state - State machine to handle bring up
* @__IDPF_STARTUP: Start the state machine
* @__IDPF_VER_CHECK: Negotiate virtchnl version * @__IDPF_VER_CHECK: Negotiate virtchnl version
* @__IDPF_GET_CAPS: Negotiate capabilities * @__IDPF_GET_CAPS: Negotiate capabilities
* @__IDPF_INIT_SW: Init based on given capabilities * @__IDPF_INIT_SW: Init based on given capabilities
* @__IDPF_STATE_LAST: Must be last, used to determine size * @__IDPF_STATE_LAST: Must be last, used to determine size
*/ */
enum idpf_state { enum idpf_state {
__IDPF_STARTUP,
__IDPF_VER_CHECK, __IDPF_VER_CHECK,
__IDPF_GET_CAPS, __IDPF_GET_CAPS,
__IDPF_INIT_SW, __IDPF_INIT_SW,
...@@ -87,6 +83,7 @@ enum idpf_state { ...@@ -87,6 +83,7 @@ enum idpf_state {
* @IDPF_HR_RESET_IN_PROG: Reset in progress * @IDPF_HR_RESET_IN_PROG: Reset in progress
* @IDPF_REMOVE_IN_PROG: Driver remove in progress * @IDPF_REMOVE_IN_PROG: Driver remove in progress
* @IDPF_MB_INTR_MODE: Mailbox in interrupt mode * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
* @IDPF_VC_CORE_INIT: virtchnl core has been init
* @IDPF_FLAGS_NBITS: Must be last * @IDPF_FLAGS_NBITS: Must be last
*/ */
enum idpf_flags { enum idpf_flags {
...@@ -95,6 +92,7 @@ enum idpf_flags { ...@@ -95,6 +92,7 @@ enum idpf_flags {
IDPF_HR_RESET_IN_PROG, IDPF_HR_RESET_IN_PROG,
IDPF_REMOVE_IN_PROG, IDPF_REMOVE_IN_PROG,
IDPF_MB_INTR_MODE, IDPF_MB_INTR_MODE,
IDPF_VC_CORE_INIT,
IDPF_FLAGS_NBITS, IDPF_FLAGS_NBITS,
}; };
...@@ -209,71 +207,6 @@ struct idpf_dev_ops { ...@@ -209,71 +207,6 @@ struct idpf_dev_ops {
struct idpf_reg_ops reg_ops; struct idpf_reg_ops reg_ops;
}; };
/* These macros allow us to generate an enum and a matching char * array of
* stringified enums that are always in sync. Checkpatch issues a bogus warning
* about this being a complex macro; but it's wrong, these are never used as a
* statement and instead only used to define the enum and array.
*/
#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \
STATE(IDPF_VC_CREATE_VPORT) \
STATE(IDPF_VC_CREATE_VPORT_ERR) \
STATE(IDPF_VC_ENA_VPORT) \
STATE(IDPF_VC_ENA_VPORT_ERR) \
STATE(IDPF_VC_DIS_VPORT) \
STATE(IDPF_VC_DIS_VPORT_ERR) \
STATE(IDPF_VC_DESTROY_VPORT) \
STATE(IDPF_VC_DESTROY_VPORT_ERR) \
STATE(IDPF_VC_CONFIG_TXQ) \
STATE(IDPF_VC_CONFIG_TXQ_ERR) \
STATE(IDPF_VC_CONFIG_RXQ) \
STATE(IDPF_VC_CONFIG_RXQ_ERR) \
STATE(IDPF_VC_ENA_QUEUES) \
STATE(IDPF_VC_ENA_QUEUES_ERR) \
STATE(IDPF_VC_DIS_QUEUES) \
STATE(IDPF_VC_DIS_QUEUES_ERR) \
STATE(IDPF_VC_MAP_IRQ) \
STATE(IDPF_VC_MAP_IRQ_ERR) \
STATE(IDPF_VC_UNMAP_IRQ) \
STATE(IDPF_VC_UNMAP_IRQ_ERR) \
STATE(IDPF_VC_ADD_QUEUES) \
STATE(IDPF_VC_ADD_QUEUES_ERR) \
STATE(IDPF_VC_DEL_QUEUES) \
STATE(IDPF_VC_DEL_QUEUES_ERR) \
STATE(IDPF_VC_ALLOC_VECTORS) \
STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
STATE(IDPF_VC_DEALLOC_VECTORS) \
STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \
STATE(IDPF_VC_SET_SRIOV_VFS) \
STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \
STATE(IDPF_VC_GET_RSS_LUT) \
STATE(IDPF_VC_GET_RSS_LUT_ERR) \
STATE(IDPF_VC_SET_RSS_LUT) \
STATE(IDPF_VC_SET_RSS_LUT_ERR) \
STATE(IDPF_VC_GET_RSS_KEY) \
STATE(IDPF_VC_GET_RSS_KEY_ERR) \
STATE(IDPF_VC_SET_RSS_KEY) \
STATE(IDPF_VC_SET_RSS_KEY_ERR) \
STATE(IDPF_VC_GET_STATS) \
STATE(IDPF_VC_GET_STATS_ERR) \
STATE(IDPF_VC_ADD_MAC_ADDR) \
STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \
STATE(IDPF_VC_DEL_MAC_ADDR) \
STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \
STATE(IDPF_VC_GET_PTYPE_INFO) \
STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \
STATE(IDPF_VC_LOOPBACK_STATE) \
STATE(IDPF_VC_LOOPBACK_STATE_ERR) \
STATE(IDPF_VC_NBITS)
#define IDPF_GEN_ENUM(ENUM) ENUM,
#define IDPF_GEN_STRING(STRING) #STRING,
enum idpf_vport_vc_state {
IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM)
};
extern const char * const idpf_vport_vc_state_str[];
/** /**
* enum idpf_vport_reset_cause - Vport soft reset causes * enum idpf_vport_reset_cause - Vport soft reset causes
* @IDPF_SR_Q_CHANGE: Soft reset queue change * @IDPF_SR_Q_CHANGE: Soft reset queue change
...@@ -358,11 +291,7 @@ struct idpf_port_stats { ...@@ -358,11 +291,7 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats * @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up * @link_up: True if link is up
* @link_speed_mbps: Link speed in mbps * @link_speed_mbps: Link speed in mbps
* @vc_msg: Virtchnl message buffer
* @vc_state: Virtchnl message state
* @vchnl_wq: Wait queue for virtchnl messages
* @sw_marker_wq: workqueue for marker packets * @sw_marker_wq: workqueue for marker packets
* @vc_buf_lock: Lock to protect virtchnl buffer
*/ */
struct idpf_vport { struct idpf_vport {
u16 num_txq; u16 num_txq;
...@@ -408,12 +337,7 @@ struct idpf_vport { ...@@ -408,12 +337,7 @@ struct idpf_vport {
bool link_up; bool link_up;
u32 link_speed_mbps; u32 link_speed_mbps;
char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
wait_queue_head_t vchnl_wq;
wait_queue_head_t sw_marker_wq; wait_queue_head_t sw_marker_wq;
struct mutex vc_buf_lock;
}; };
/** /**
...@@ -476,15 +400,11 @@ struct idpf_vport_user_config_data { ...@@ -476,15 +400,11 @@ struct idpf_vport_user_config_data {
* enum idpf_vport_config_flags - Vport config flags * enum idpf_vport_config_flags - Vport config flags
* @IDPF_VPORT_REG_NETDEV: Register netdev * @IDPF_VPORT_REG_NETDEV: Register netdev
* @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
* @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight
* @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight
* @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
*/ */
enum idpf_vport_config_flags { enum idpf_vport_config_flags {
IDPF_VPORT_REG_NETDEV, IDPF_VPORT_REG_NETDEV,
IDPF_VPORT_UP_REQUESTED, IDPF_VPORT_UP_REQUESTED,
IDPF_VPORT_ADD_MAC_REQ,
IDPF_VPORT_DEL_MAC_REQ,
IDPF_VPORT_CONFIG_FLAGS_NBITS, IDPF_VPORT_CONFIG_FLAGS_NBITS,
}; };
...@@ -555,11 +475,13 @@ struct idpf_vector_lifo { ...@@ -555,11 +475,13 @@ struct idpf_vector_lifo {
struct idpf_vport_config { struct idpf_vport_config {
struct idpf_vport_user_config_data user_config; struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q; struct idpf_vport_max_q max_q;
void *req_qs_chunks; struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock; spinlock_t mac_filter_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS); DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
}; };
struct idpf_vc_xn_manager;
/** /**
* struct idpf_adapter - Device data struct generated on probe * struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe * @pdev: PCI device struct given on probe
...@@ -601,9 +523,7 @@ struct idpf_vport_config { ...@@ -601,9 +523,7 @@ struct idpf_vport_config {
* @stats_task: Periodic statistics retrieval task * @stats_task: Periodic statistics retrieval task
* @stats_wq: Workqueue for statistics task * @stats_wq: Workqueue for statistics task
* @caps: Negotiated capabilities with device * @caps: Negotiated capabilities with device
* @vchnl_wq: Wait queue for virtchnl messages * @vcxn_mngr: Virtchnl transaction manager
* @vc_state: Virtchnl message state
* @vc_msg: Virtchnl message buffer
* @dev_ops: See idpf_dev_ops * @dev_ops: See idpf_dev_ops
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
* to VFs but is used to initialize them * to VFs but is used to initialize them
...@@ -659,10 +579,8 @@ struct idpf_adapter { ...@@ -659,10 +579,8 @@ struct idpf_adapter {
struct delayed_work stats_task; struct delayed_work stats_task;
struct workqueue_struct *stats_wq; struct workqueue_struct *stats_wq;
struct virtchnl2_get_capabilities caps; struct virtchnl2_get_capabilities caps;
struct idpf_vc_xn_manager *vcxn_mngr;
wait_queue_head_t vchnl_wq;
DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
struct idpf_dev_ops dev_ops; struct idpf_dev_ops dev_ops;
int num_vfs; int num_vfs;
bool crc_enable; bool crc_enable;
...@@ -903,68 +821,18 @@ void idpf_mbx_task(struct work_struct *work); ...@@ -903,68 +821,18 @@ void idpf_mbx_task(struct work_struct *work);
void idpf_vc_event_task(struct work_struct *work); void idpf_vc_event_task(struct work_struct *work);
void idpf_dev_ops_init(struct idpf_adapter *adapter); void idpf_dev_ops_init(struct idpf_adapter *adapter);
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter); void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
int idpf_vport_adjust_qs(struct idpf_vport *vport);
int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
void idpf_vc_core_deinit(struct idpf_adapter *adapter);
int idpf_intr_req(struct idpf_adapter *adapter); int idpf_intr_req(struct idpf_adapter *adapter);
void idpf_intr_rel(struct idpf_adapter *adapter); void idpf_intr_rel(struct idpf_adapter *adapter);
int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
struct idpf_vec_regs *reg_vals);
u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter); u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
int idpf_send_delete_queues_msg(struct idpf_vport *vport);
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_initiate_soft_reset(struct idpf_vport *vport, int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause); enum idpf_vport_reset_cause reset_cause);
int idpf_send_enable_vport_msg(struct idpf_vport *vport);
int idpf_send_disable_vport_msg(struct idpf_vport *vport);
int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
void idpf_deinit_task(struct idpf_adapter *adapter); void idpf_deinit_task(struct idpf_adapter *adapter);
int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
u16 *q_vector_idxs, u16 *q_vector_idxs,
struct idpf_vector_info *vec_info); struct idpf_vector_info *vec_info);
int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
int idpf_send_get_stats_msg(struct idpf_vport *vport);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
u16 *vecids, int num_vecids,
struct virtchnl2_vector_chunks *chunks);
int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
void *msg, int msg_size);
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg);
void idpf_set_ethtool_ops(struct net_device *netdev); void idpf_set_ethtool_ops(struct net_device *netdev);
int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
int idpf_add_del_mac_filters(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
bool add, bool async);
int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id);
int idpf_send_disable_queues_msg(struct idpf_vport *vport);
void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport);
int idpf_vport_queue_ids_init(struct idpf_vport *vport);
int idpf_queue_reg_init(struct idpf_vport *vport);
int idpf_send_config_queues_msg(struct idpf_vport *vport);
int idpf_send_enable_queues_msg(struct idpf_vport *vport);
int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
int idpf_check_supported_desc_ids(struct idpf_vport *vport);
void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
u16 itr, bool tx); u16 itr, bool tx);
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport); u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
......
...@@ -516,6 +516,8 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, ...@@ -516,6 +516,8 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
/* Wrap to end of end ring since current ntp is 0 */ /* Wrap to end of end ring since current ntp is 0 */
cq->next_to_post = cq->ring_size - 1; cq->next_to_post = cq->ring_size - 1;
dma_wmb();
wr32(hw, cq->reg.tail, cq->next_to_post); wr32(hw, cq->reg.tail, cq->next_to_post);
} }
...@@ -546,11 +548,6 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, ...@@ -546,11 +548,6 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
int err = 0; int err = 0;
u16 i; u16 i;
if (*num_q_msg == 0)
return 0;
else if (*num_q_msg > cq->ring_size)
return -EBADR;
/* take the lock before we start messing with the ring */ /* take the lock before we start messing with the ring */
mutex_lock(&cq->cq_lock); mutex_lock(&cq->cq_lock);
......
...@@ -69,6 +69,11 @@ struct idpf_ctlq_msg { ...@@ -69,6 +69,11 @@ struct idpf_ctlq_msg {
u8 context[IDPF_INDIRECT_CTX_SIZE]; u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload; struct idpf_dma_mem *payload;
} indirect; } indirect;
struct {
u32 rsvd;
u16 data;
u16 flags;
} sw_cookie;
} ctx; } ctx;
}; };
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include "idpf.h" #include "idpf.h"
#include "idpf_lan_pf_regs.h" #include "idpf_lan_pf_regs.h"
#include "idpf_virtchnl.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4 #define IDPF_PF_ITR_IDX_SPACING 0x4
......
...@@ -2,14 +2,11 @@ ...@@ -2,14 +2,11 @@
/* Copyright (C) 2023 Intel Corporation */ /* Copyright (C) 2023 Intel Corporation */
#include "idpf.h" #include "idpf.h"
#include "idpf_virtchnl.h"
static const struct net_device_ops idpf_netdev_ops_splitq; static const struct net_device_ops idpf_netdev_ops_splitq;
static const struct net_device_ops idpf_netdev_ops_singleq; static const struct net_device_ops idpf_netdev_ops_singleq;
const char * const idpf_vport_vc_state_str[] = {
IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
};
/** /**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
* @adapter: private data struct * @adapter: private data struct
...@@ -82,19 +79,12 @@ static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) ...@@ -82,19 +79,12 @@ static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
*/ */
void idpf_intr_rel(struct idpf_adapter *adapter) void idpf_intr_rel(struct idpf_adapter *adapter)
{ {
int err;
if (!adapter->msix_entries) if (!adapter->msix_entries)
return; return;
idpf_mb_intr_rel_irq(adapter); idpf_mb_intr_rel_irq(adapter);
pci_free_irq_vectors(adapter->pdev); pci_free_irq_vectors(adapter->pdev);
idpf_send_dealloc_vectors_msg(adapter);
err = idpf_send_dealloc_vectors_msg(adapter);
if (err)
dev_err(&adapter->pdev->dev,
"Failed to deallocate vectors: %d\n", err);
idpf_deinit_vector_stack(adapter); idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries); kfree(adapter->msix_entries);
adapter->msix_entries = NULL; adapter->msix_entries = NULL;
...@@ -975,7 +965,6 @@ static void idpf_vport_rel(struct idpf_vport *vport) ...@@ -975,7 +965,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
struct idpf_rss_data *rss_data; struct idpf_rss_data *rss_data;
struct idpf_vport_max_q max_q; struct idpf_vport_max_q max_q;
u16 idx = vport->idx; u16 idx = vport->idx;
int i;
vport_config = adapter->vport_config[vport->idx]; vport_config = adapter->vport_config[vport->idx];
idpf_deinit_rss(vport); idpf_deinit_rss(vport);
...@@ -985,20 +974,6 @@ static void idpf_vport_rel(struct idpf_vport *vport) ...@@ -985,20 +974,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
idpf_send_destroy_vport_msg(vport); idpf_send_destroy_vport_msg(vport);
/* Set all bits as we dont know on which vc_state the vport vhnl_wq
* is waiting on and wakeup the virtchnl workqueue even if it is
* waiting for the response as we are going down
*/
for (i = 0; i < IDPF_VC_NBITS; i++)
set_bit(i, vport->vc_state);
wake_up(&vport->vchnl_wq);
mutex_destroy(&vport->vc_buf_lock);
/* Clear all the bits */
for (i = 0; i < IDPF_VC_NBITS; i++)
clear_bit(i, vport->vc_state);
/* Release all max queues allocated to the adapter's pool */ /* Release all max queues allocated to the adapter's pool */
max_q.max_rxq = vport_config->max_q.max_rxq; max_q.max_rxq = vport_config->max_q.max_rxq;
max_q.max_txq = vport_config->max_q.max_txq; max_q.max_txq = vport_config->max_q.max_txq;
...@@ -1253,7 +1228,7 @@ void idpf_mbx_task(struct work_struct *work) ...@@ -1253,7 +1228,7 @@ void idpf_mbx_task(struct work_struct *work)
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
msecs_to_jiffies(300)); msecs_to_jiffies(300));
idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0); idpf_recv_mb_msg(adapter);
} }
/** /**
...@@ -1543,9 +1518,7 @@ void idpf_init_task(struct work_struct *work) ...@@ -1543,9 +1518,7 @@ void idpf_init_task(struct work_struct *work)
vport_config = adapter->vport_config[index]; vport_config = adapter->vport_config[index];
init_waitqueue_head(&vport->sw_marker_wq); init_waitqueue_head(&vport->sw_marker_wq);
init_waitqueue_head(&vport->vchnl_wq);
mutex_init(&vport->vc_buf_lock);
spin_lock_init(&vport_config->mac_filter_list_lock); spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list); INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
...@@ -1823,6 +1796,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) ...@@ -1823,6 +1796,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
goto unlock_mutex; goto unlock_mutex;
} }
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
/* Initialize the state machine, also allocate memory and request /* Initialize the state machine, also allocate memory and request
* resources * resources
*/ */
...@@ -1902,7 +1877,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, ...@@ -1902,7 +1877,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport * mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to. * and should instead always refer to them in vport if they need to.
*/ */
memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state)); memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Adjust resource parameters prior to reallocating resources */ /* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) { switch (reset_cause) {
...@@ -1951,7 +1926,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, ...@@ -1951,7 +1926,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and /* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible. * mutexes applies here. We do not want to mess with those if possible.
*/ */
memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state)); memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Since idpf_vport_queues_alloc was called with new_port, the queue /* Since idpf_vport_queues_alloc was called with new_port, the queue
* back pointers are currently pointing to the local new_vport. Reset * back pointers are currently pointing to the local new_vport. Reset
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include "idpf.h" #include "idpf.h"
#include "idpf_devids.h" #include "idpf_devids.h"
#include "idpf_virtchnl.h"
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
...@@ -30,6 +31,7 @@ static void idpf_remove(struct pci_dev *pdev) ...@@ -30,6 +31,7 @@ static void idpf_remove(struct pci_dev *pdev)
idpf_sriov_configure(pdev, 0); idpf_sriov_configure(pdev, 0);
idpf_vc_core_deinit(adapter); idpf_vc_core_deinit(adapter);
/* Be a good citizen and leave the device clean on exit */ /* Be a good citizen and leave the device clean on exit */
adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET); adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter); idpf_deinit_dflt_mbx(adapter);
...@@ -66,6 +68,8 @@ static void idpf_remove(struct pci_dev *pdev) ...@@ -66,6 +68,8 @@ static void idpf_remove(struct pci_dev *pdev)
adapter->vport_config = NULL; adapter->vport_config = NULL;
kfree(adapter->netdevs); kfree(adapter->netdevs);
adapter->netdevs = NULL; adapter->netdevs = NULL;
kfree(adapter->vcxn_mngr);
adapter->vcxn_mngr = NULL;
mutex_destroy(&adapter->vport_ctrl_lock); mutex_destroy(&adapter->vport_ctrl_lock);
mutex_destroy(&adapter->vector_lock); mutex_destroy(&adapter->vector_lock);
...@@ -229,8 +233,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -229,8 +233,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&adapter->queue_lock); mutex_init(&adapter->queue_lock);
mutex_init(&adapter->vc_buf_lock); mutex_init(&adapter->vc_buf_lock);
init_waitqueue_head(&adapter->vchnl_wq);
INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task); INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task); INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task); INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */ /* Copyright (C) 2023 Intel Corporation */
#include "idpf.h" #include "idpf.h"
#include "idpf_virtchnl.h"
/** /**
* idpf_buf_lifo_push - push a buffer pointer onto stack * idpf_buf_lifo_push - push a buffer pointer onto stack
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include "idpf.h" #include "idpf.h"
#include "idpf_lan_vf_regs.h" #include "idpf_lan_vf_regs.h"
#include "idpf_virtchnl.h"
#define IDPF_VF_ITR_IDX_SPACING 0x40 #define IDPF_VF_ITR_IDX_SPACING 0x40
...@@ -137,7 +138,7 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter, ...@@ -137,7 +138,7 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */ /* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET && if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL); idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
} }
/** /**
......
...@@ -2,46 +2,192 @@ ...@@ -2,46 +2,192 @@
/* Copyright (C) 2023 Intel Corporation */ /* Copyright (C) 2023 Intel Corporation */
#include "idpf.h" #include "idpf.h"
#include "idpf_virtchnl.h"
#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
#define IDPF_VC_XN_RING_LEN U8_MAX
/**
* enum idpf_vc_xn_state - Virtchnl transaction status
* @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
* @IDPF_VC_XN_WAITING: expecting a reply, not yet received
* @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
* buffer updated
* @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
* was an error, buffer not updated
* @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
* @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
* return context; a callback may be provided to handle
* return
*/
enum idpf_vc_xn_state {
IDPF_VC_XN_IDLE = 1,
IDPF_VC_XN_WAITING,
IDPF_VC_XN_COMPLETED_SUCCESS,
IDPF_VC_XN_COMPLETED_FAILED,
IDPF_VC_XN_SHUTDOWN,
IDPF_VC_XN_ASYNC,
};
struct idpf_vc_xn;
/* Callback for asynchronous messages */
typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
const struct idpf_ctlq_msg *);
/**
* struct idpf_vc_xn - Data structure representing virtchnl transactions
* @completed: virtchnl event loop uses that to signal when a reply is
* available, uses kernel completion API
* @state: virtchnl event loop stores the data below, protected by the
* completion's lock.
* @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
* truncated on its way to the receiver thread according to
* reply_buf.iov_len.
* @reply: Reference to the buffer(s) where the reply data should be written
* to. May be 0-length (then NULL address permitted) if the reply data
* should be ignored.
* @async_handler: if sent asynchronously, a callback can be provided to handle
* the reply when it's received
* @vc_op: corresponding opcode sent with this transaction
* @idx: index used as retrieval on reply receive, used for cookie
* @salt: changed every message to make unique, used for cookie
*/
struct idpf_vc_xn {
struct completion completed;
enum idpf_vc_xn_state state;
size_t reply_sz;
struct kvec reply;
async_vc_cb async_handler;
u32 vc_op;
u8 idx;
u8 salt;
};
/**
* struct idpf_vc_xn_params - Parameters for executing transaction
* @send_buf: kvec for send buffer
* @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
* @timeout_ms: timeout to wait for reply
* @async: send message asynchronously, will not wait on completion
* @async_handler: If sent asynchronously, optional callback handler. The user
* must be careful when using async handlers as the memory for
* the recv_buf _cannot_ be on stack if this is async.
* @vc_op: virtchnl op to send
*/
struct idpf_vc_xn_params {
struct kvec send_buf;
struct kvec recv_buf;
int timeout_ms;
bool async;
async_vc_cb async_handler;
u32 vc_op;
};
/**
* struct idpf_vc_xn_manager - Manager for tracking transactions
* @ring: backing and lookup for transactions
* @free_xn_bm: bitmap for free transactions
* @xn_bm_lock: make bitmap access synchronous where necessary
* @salt: used to make cookie unique every message
*/
struct idpf_vc_xn_manager {
struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
spinlock_t xn_bm_lock;
u8 salt;
};
/**
* idpf_vid_to_vport - Translate vport id to vport pointer
* @adapter: private data struct
* @v_id: vport id to translate
*
* Returns vport matching v_id, NULL if not found.
*/
static
struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
{
u16 num_max_vports = idpf_get_max_vports(adapter);
int i;
for (i = 0; i < num_max_vports; i++)
if (adapter->vport_ids[i] == v_id)
return adapter->vports[i];
return NULL;
}
/**
* idpf_handle_event_link - Handle link event message
* @adapter: private data struct
* @v2e: virtchnl event message
*/
static void idpf_handle_event_link(struct idpf_adapter *adapter,
const struct virtchnl2_event *v2e)
{
struct idpf_netdev_priv *np;
struct idpf_vport *vport;
vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
if (!vport) {
dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
v2e->vport_id);
return;
}
np = netdev_priv(vport->netdev);
vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
if (vport->link_up == v2e->link_status)
return;
vport->link_up = v2e->link_status;
if (np->state != __IDPF_VPORT_UP)
return;
if (vport->link_up) {
netif_tx_start_all_queues(vport->netdev);
netif_carrier_on(vport->netdev);
} else {
netif_tx_stop_all_queues(vport->netdev);
netif_carrier_off(vport->netdev);
}
}
/** /**
* idpf_recv_event_msg - Receive virtchnl event message * idpf_recv_event_msg - Receive virtchnl event message
* @vport: virtual port structure * @adapter: Driver specific private structure
* @ctlq_msg: message to copy from * @ctlq_msg: message to copy from
* *
* Receive virtchnl event message * Receive virtchnl event message
*/ */
static void idpf_recv_event_msg(struct idpf_vport *vport, static void idpf_recv_event_msg(struct idpf_adapter *adapter,
struct idpf_ctlq_msg *ctlq_msg) struct idpf_ctlq_msg *ctlq_msg)
{ {
struct idpf_netdev_priv *np = netdev_priv(vport->netdev); int payload_size = ctlq_msg->ctx.indirect.payload->size;
struct virtchnl2_event *v2e; struct virtchnl2_event *v2e;
bool link_status;
u32 event; u32 event;
if (payload_size < sizeof(*v2e)) {
dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
ctlq_msg->cookie.mbx.chnl_opcode,
payload_size);
return;
}
v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
event = le32_to_cpu(v2e->event); event = le32_to_cpu(v2e->event);
switch (event) { switch (event) {
case VIRTCHNL2_EVENT_LINK_CHANGE: case VIRTCHNL2_EVENT_LINK_CHANGE:
vport->link_speed_mbps = le32_to_cpu(v2e->link_speed); idpf_handle_event_link(adapter, v2e);
link_status = v2e->link_status; return;
if (vport->link_up == link_status)
break;
vport->link_up = link_status;
if (np->state == __IDPF_VPORT_UP) {
if (vport->link_up) {
netif_carrier_on(vport->netdev);
netif_tx_start_all_queues(vport->netdev);
} else {
netif_tx_stop_all_queues(vport->netdev);
netif_carrier_off(vport->netdev);
}
}
break;
default: default:
dev_err(&vport->adapter->pdev->dev, dev_err(&adapter->pdev->dev,
"Unknown event %d from PF\n", event); "Unknown event %d from PF\n", event);
break; break;
} }
...@@ -93,13 +239,14 @@ static int idpf_mb_clean(struct idpf_adapter *adapter) ...@@ -93,13 +239,14 @@ static int idpf_mb_clean(struct idpf_adapter *adapter)
* @op: virtchnl opcode * @op: virtchnl opcode
* @msg_size: size of the payload * @msg_size: size of the payload
* @msg: pointer to buffer holding the payload * @msg: pointer to buffer holding the payload
* @cookie: unique SW generated cookie per message
* *
* Will prepare the control queue message and initiates the send api * Will prepare the control queue message and initiates the send api
* *
* Returns 0 on success, negative on failure * Returns 0 on success, negative on failure
*/ */
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg) u16 msg_size, u8 *msg, u16 cookie)
{ {
struct idpf_ctlq_msg *ctlq_msg; struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem; struct idpf_dma_mem *dma_mem;
...@@ -139,8 +286,12 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, ...@@ -139,8 +286,12 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
err = -ENOMEM; err = -ENOMEM;
goto dma_alloc_error; goto dma_alloc_error;
} }
memcpy(dma_mem->va, msg, msg_size);
/* It's possible we're just sending an opcode but no buffer */
if (msg && msg_size)
memcpy(dma_mem->va, msg, msg_size);
ctlq_msg->ctx.indirect.payload = dma_mem; ctlq_msg->ctx.indirect.payload = dma_mem;
ctlq_msg->ctx.sw_cookie.data = cookie;
err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
if (err) if (err)
...@@ -159,592 +310,432 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, ...@@ -159,592 +310,432 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
return err; return err;
} }
/** /* API for virtchnl "transaction" support ("xn" for short).
* idpf_find_vport - Find vport pointer from control queue message
* @adapter: driver specific private structure
* @vport: address of vport pointer to copy the vport from adapters vport list
* @ctlq_msg: control queue message
* *
* Return 0 on success, error value on failure. Also this function does check * We are reusing the completion lock to serialize the accesses to the
* for the opcodes which expect to receive payload and return error value if * transaction state for simplicity, but it could be its own separate synchro
* it is not the case. * as well. For now, this API is only used from within a workqueue context;
* raw_spin_lock() is enough.
*/ */
static int idpf_find_vport(struct idpf_adapter *adapter, /**
struct idpf_vport **vport, * idpf_vc_xn_lock - Request exclusive access to vc transaction
struct idpf_ctlq_msg *ctlq_msg) * @xn: struct idpf_vc_xn* to access
{ */
bool no_op = false, vid_found = false; #define idpf_vc_xn_lock(xn) \
int i, err = 0; raw_spin_lock(&(xn)->completed.wait.lock)
char *vc_msg;
u32 v_id;
vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL); /**
if (!vc_msg) * idpf_vc_xn_unlock - Release exclusive access to vc transaction
return -ENOMEM; * @xn: struct idpf_vc_xn* to access
*/
#define idpf_vc_xn_unlock(xn) \
raw_spin_unlock(&(xn)->completed.wait.lock)
if (ctlq_msg->data_len) { /**
size_t payload_size = ctlq_msg->ctx.indirect.payload->size; * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
* reset the transaction state.
* @xn: struct idpf_vc_xn to update
*/
static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
{
xn->reply.iov_base = NULL;
xn->reply.iov_len = 0;
if (!payload_size) { if (xn->state != IDPF_VC_XN_SHUTDOWN)
dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n"); xn->state = IDPF_VC_XN_IDLE;
kfree(vc_msg); }
return -EINVAL; /**
} * idpf_vc_xn_init - Initialize virtchnl transaction object
* @vcxn_mngr: pointer to vc transaction manager struct
*/
static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
{
int i;
memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va, spin_lock_init(&vcxn_mngr->xn_bm_lock);
min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN));
}
switch (ctlq_msg->cookie.mbx.chnl_opcode) {
case VIRTCHNL2_OP_VERSION:
case VIRTCHNL2_OP_GET_CAPS:
case VIRTCHNL2_OP_CREATE_VPORT:
case VIRTCHNL2_OP_SET_SRIOV_VFS:
case VIRTCHNL2_OP_ALLOC_VECTORS:
case VIRTCHNL2_OP_DEALLOC_VECTORS:
case VIRTCHNL2_OP_GET_PTYPE_INFO:
goto free_vc_msg;
case VIRTCHNL2_OP_ENABLE_VPORT:
case VIRTCHNL2_OP_DISABLE_VPORT:
case VIRTCHNL2_OP_DESTROY_VPORT:
v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_ENABLE_QUEUES:
case VIRTCHNL2_OP_DISABLE_QUEUES:
case VIRTCHNL2_OP_DEL_QUEUES:
v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_ADD_QUEUES:
v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_GET_STATS:
v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_GET_RSS_LUT:
case VIRTCHNL2_OP_SET_RSS_LUT:
v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_GET_RSS_KEY:
case VIRTCHNL2_OP_SET_RSS_KEY:
v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_EVENT:
v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_LOOPBACK:
v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id);
break;
case VIRTCHNL2_OP_ADD_MAC_ADDR:
case VIRTCHNL2_OP_DEL_MAC_ADDR:
v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id);
break;
default:
no_op = true;
break;
}
if (no_op) for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
goto free_vc_msg; struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
for (i = 0; i < idpf_get_max_vports(adapter); i++) { xn->state = IDPF_VC_XN_IDLE;
if (adapter->vport_ids[i] == v_id) { xn->idx = i;
vid_found = true; idpf_vc_xn_release_bufs(xn);
break; init_completion(&xn->completed);
}
} }
if (vid_found) bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
*vport = adapter->vports[i];
else
err = -EINVAL;
free_vc_msg:
kfree(vc_msg);
return err;
} }
/** /**
* idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer. * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
* @adapter: driver specific private structure * @vcxn_mngr: pointer to vc transaction manager struct
* @vport: virtual port structure
* @ctlq_msg: msg to copy from
* @err_enum: err bit to set on error
* *
* Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success, * All waiting threads will be woken-up and their transaction aborted. Further
* negative on failure. * operations on that object will fail.
*/ */
static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter, static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
struct idpf_vport *vport,
struct idpf_ctlq_msg *ctlq_msg,
enum idpf_vport_vc_state err_enum)
{ {
if (ctlq_msg->cookie.mbx.chnl_retval) { int i;
if (vport)
set_bit(err_enum, vport->vc_state);
else
set_bit(err_enum, adapter->vc_state);
return -EINVAL; spin_lock_bh(&vcxn_mngr->xn_bm_lock);
} bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
if (vport) for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va, struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
min_t(int, ctlq_msg->ctx.indirect.payload->size,
IDPF_CTLQ_MAX_BUF_LEN));
else
memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va,
min_t(int, ctlq_msg->ctx.indirect.payload->size,
IDPF_CTLQ_MAX_BUF_LEN));
return 0; idpf_vc_xn_lock(xn);
xn->state = IDPF_VC_XN_SHUTDOWN;
idpf_vc_xn_release_bufs(xn);
idpf_vc_xn_unlock(xn);
complete_all(&xn->completed);
}
} }
/** /**
* idpf_recv_vchnl_op - helper function with common logic when handling the * idpf_vc_xn_pop_free - Pop a free transaction from free list
* reception of VIRTCHNL OPs. * @vcxn_mngr: transaction manager to pop from
* @adapter: driver specific private structure *
* @vport: virtual port structure * Returns NULL if no free transactions
* @ctlq_msg: msg to copy from
* @state: state bit used on timeout check
* @err_state: err bit to set on error
*/ */
static void idpf_recv_vchnl_op(struct idpf_adapter *adapter, static
struct idpf_vport *vport, struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
struct idpf_ctlq_msg *ctlq_msg,
enum idpf_vport_vc_state state,
enum idpf_vport_vc_state err_state)
{ {
wait_queue_head_t *vchnl_wq; struct idpf_vc_xn *xn = NULL;
int err; unsigned long free_idx;
if (vport) spin_lock_bh(&vcxn_mngr->xn_bm_lock);
vchnl_wq = &vport->vchnl_wq; free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
else if (free_idx == IDPF_VC_XN_RING_LEN)
vchnl_wq = &adapter->vchnl_wq; goto do_unlock;
err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state); clear_bit(free_idx, vcxn_mngr->free_xn_bm);
if (wq_has_sleeper(vchnl_wq)) { xn = &vcxn_mngr->ring[free_idx];
if (vport) xn->salt = vcxn_mngr->salt++;
set_bit(state, vport->vc_state);
else
set_bit(state, adapter->vc_state);
wake_up(vchnl_wq); do_unlock:
} else { spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
if (!err) {
dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n", return xn;
ctlq_msg->cookie.mbx.chnl_opcode);
} else {
/* Clear the errors since there is no sleeper to pass
* them on
*/
if (vport)
clear_bit(err_state, vport->vc_state);
else
clear_bit(err_state, adapter->vc_state);
}
}
} }
/** /**
* idpf_recv_mb_msg - Receive message over mailbox * idpf_vc_xn_push_free - Push a free transaction to free list
* @adapter: Driver specific private structure * @vcxn_mngr: transaction manager to push to
* @op: virtchannel operation code * @xn: transaction to push
* @msg: Received message holding buffer
* @msg_size: message size
*
* Will receive control queue message and posts the receive buffer. Returns 0
* on success and negative on failure.
*/ */
int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
void *msg, int msg_size) struct idpf_vc_xn *xn)
{ {
struct idpf_vport *vport = NULL; idpf_vc_xn_release_bufs(xn);
struct idpf_ctlq_msg ctlq_msg; set_bit(xn->idx, vcxn_mngr->free_xn_bm);
struct idpf_dma_mem *dma_mem; }
bool work_done = false;
int num_retry = 2000;
u16 num_q_msg;
int err;
while (1) {
struct idpf_vport_config *vport_config;
int payload_size = 0;
/* Try to get one message */
num_q_msg = 1;
dma_mem = NULL;
err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg);
/* If no message then decide if we have to retry based on
* opcode
*/
if (err || !num_q_msg) {
/* Increasing num_retry to consider the delayed
* responses because of large number of VF's mailbox
* messages. If the mailbox message is received from
* the other side, we come out of the sleep cycle
* immediately else we wait for more time.
*/
if (!op || !num_retry--)
break;
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
err = -EIO;
break;
}
msleep(20);
continue;
}
/* If we are here a message is received. Check if we are looking /**
* for a specific message based on opcode. If it is different * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
* ignore and post buffers * @adapter: driver specific private structure with vcxn_mngr
* @params: parameters for this particular transaction including
* -vc_op: virtchannel operation to send
* -send_buf: kvec iov for send buf and len
* -recv_buf: kvec iov for recv buf and len (ignored if NULL)
* -timeout_ms: timeout waiting for a reply (milliseconds)
* -async: don't wait for message reply, will lose caller context
* -async_handler: callback to handle async replies
*
* @returns >= 0 for success, the size of the initial reply (may or may not be
* >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
* error.
*/
static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
const struct idpf_vc_xn_params *params)
{
const struct kvec *send_buf = &params->send_buf;
struct idpf_vc_xn *xn;
ssize_t retval;
u16 cookie;
xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
/* no free transactions available */
if (!xn)
return -ENOSPC;
idpf_vc_xn_lock(xn);
if (xn->state == IDPF_VC_XN_SHUTDOWN) {
retval = -ENXIO;
goto only_unlock;
} else if (xn->state != IDPF_VC_XN_IDLE) {
/* We're just going to clobber this transaction even though
* it's not IDLE. If we don't reuse it we could theoretically
* eventually leak all the free transactions and not be able to
* send any messages. At least this way we make an attempt to
* remain functional even though something really bad is
* happening that's corrupting what was supposed to be free
* transactions.
*/ */
if (op && ctlq_msg.cookie.mbx.chnl_opcode != op) WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
goto post_buffs; xn->idx, xn->vc_op);
}
err = idpf_find_vport(adapter, &vport, &ctlq_msg); xn->reply = params->recv_buf;
if (err) xn->reply_sz = 0;
goto post_buffs; xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
xn->vc_op = params->vc_op;
xn->async_handler = params->async_handler;
idpf_vc_xn_unlock(xn);
if (ctlq_msg.data_len) if (!params->async)
payload_size = ctlq_msg.ctx.indirect.payload->size; reinit_completion(&xn->completed);
cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
/* All conditions are met. Either a message requested is retval = idpf_send_mb_msg(adapter, params->vc_op,
* received or we received a message to be processed send_buf->iov_len, send_buf->iov_base,
*/ cookie);
switch (ctlq_msg.cookie.mbx.chnl_opcode) { if (retval) {
case VIRTCHNL2_OP_VERSION: idpf_vc_xn_lock(xn);
case VIRTCHNL2_OP_GET_CAPS: goto release_and_unlock;
if (ctlq_msg.cookie.mbx.chnl_retval) { }
dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n",
ctlq_msg.cookie.mbx.chnl_opcode,
ctlq_msg.cookie.mbx.chnl_retval);
err = -EBADMSG;
} else if (msg) {
memcpy(msg, ctlq_msg.ctx.indirect.payload->va,
min_t(int, payload_size, msg_size));
}
work_done = true;
break;
case VIRTCHNL2_OP_CREATE_VPORT:
idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
IDPF_VC_CREATE_VPORT,
IDPF_VC_CREATE_VPORT_ERR);
break;
case VIRTCHNL2_OP_ENABLE_VPORT:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_ENA_VPORT,
IDPF_VC_ENA_VPORT_ERR);
break;
case VIRTCHNL2_OP_DISABLE_VPORT:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_DIS_VPORT,
IDPF_VC_DIS_VPORT_ERR);
break;
case VIRTCHNL2_OP_DESTROY_VPORT:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_DESTROY_VPORT,
IDPF_VC_DESTROY_VPORT_ERR);
break;
case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_CONFIG_TXQ,
IDPF_VC_CONFIG_TXQ_ERR);
break;
case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_CONFIG_RXQ,
IDPF_VC_CONFIG_RXQ_ERR);
break;
case VIRTCHNL2_OP_ENABLE_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_ENA_QUEUES,
IDPF_VC_ENA_QUEUES_ERR);
break;
case VIRTCHNL2_OP_DISABLE_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_DIS_QUEUES,
IDPF_VC_DIS_QUEUES_ERR);
break;
case VIRTCHNL2_OP_ADD_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_ADD_QUEUES,
IDPF_VC_ADD_QUEUES_ERR);
break;
case VIRTCHNL2_OP_DEL_QUEUES:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_DEL_QUEUES,
IDPF_VC_DEL_QUEUES_ERR);
break;
case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_MAP_IRQ,
IDPF_VC_MAP_IRQ_ERR);
break;
case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_UNMAP_IRQ,
IDPF_VC_UNMAP_IRQ_ERR);
break;
case VIRTCHNL2_OP_GET_STATS:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_GET_STATS,
IDPF_VC_GET_STATS_ERR);
break;
case VIRTCHNL2_OP_GET_RSS_LUT:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_GET_RSS_LUT,
IDPF_VC_GET_RSS_LUT_ERR);
break;
case VIRTCHNL2_OP_SET_RSS_LUT:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_SET_RSS_LUT,
IDPF_VC_SET_RSS_LUT_ERR);
break;
case VIRTCHNL2_OP_GET_RSS_KEY:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_GET_RSS_KEY,
IDPF_VC_GET_RSS_KEY_ERR);
break;
case VIRTCHNL2_OP_SET_RSS_KEY:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_SET_RSS_KEY,
IDPF_VC_SET_RSS_KEY_ERR);
break;
case VIRTCHNL2_OP_SET_SRIOV_VFS:
idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
IDPF_VC_SET_SRIOV_VFS,
IDPF_VC_SET_SRIOV_VFS_ERR);
break;
case VIRTCHNL2_OP_ALLOC_VECTORS:
idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
IDPF_VC_ALLOC_VECTORS,
IDPF_VC_ALLOC_VECTORS_ERR);
break;
case VIRTCHNL2_OP_DEALLOC_VECTORS:
idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
IDPF_VC_DEALLOC_VECTORS,
IDPF_VC_DEALLOC_VECTORS_ERR);
break;
case VIRTCHNL2_OP_GET_PTYPE_INFO:
idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
IDPF_VC_GET_PTYPE_INFO,
IDPF_VC_GET_PTYPE_INFO_ERR);
break;
case VIRTCHNL2_OP_LOOPBACK:
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_LOOPBACK_STATE,
IDPF_VC_LOOPBACK_STATE_ERR);
break;
case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
/* This message can only be sent asynchronously. As
* such we'll have lost the context in which it was
* called and thus can only really report if it looks
* like an error occurred. Don't bother setting ERR bit
* or waking chnl_wq since no work queue will be waiting
* to read the message.
*/
if (ctlq_msg.cookie.mbx.chnl_retval) {
dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n",
ctlq_msg.cookie.mbx.chnl_retval);
}
break;
case VIRTCHNL2_OP_ADD_MAC_ADDR:
vport_config = adapter->vport_config[vport->idx];
if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ,
vport_config->flags)) {
/* Message was sent asynchronously. We don't
* normally print errors here, instead
* prefer to handle errors in the function
* calling wait_for_event. However, if
* asynchronous, the context in which the
* message was sent is lost. We can't really do
* anything about at it this point, but we
* should at a minimum indicate that it looks
* like something went wrong. Also don't bother
* setting ERR bit or waking vchnl_wq since no
* one will be waiting to read the async
* message.
*/
if (ctlq_msg.cookie.mbx.chnl_retval)
dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n",
ctlq_msg.cookie.mbx.chnl_retval);
break;
}
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_ADD_MAC_ADDR,
IDPF_VC_ADD_MAC_ADDR_ERR);
break;
case VIRTCHNL2_OP_DEL_MAC_ADDR:
vport_config = adapter->vport_config[vport->idx];
if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ,
vport_config->flags)) {
/* Message was sent asynchronously like the
* VIRTCHNL2_OP_ADD_MAC_ADDR
*/
if (ctlq_msg.cookie.mbx.chnl_retval)
dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n",
ctlq_msg.cookie.mbx.chnl_retval);
break;
}
idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
IDPF_VC_DEL_MAC_ADDR,
IDPF_VC_DEL_MAC_ADDR_ERR);
break;
case VIRTCHNL2_OP_EVENT:
idpf_recv_event_msg(vport, &ctlq_msg);
break;
default:
dev_warn(&adapter->pdev->dev,
"Unhandled virtchnl response %d\n",
ctlq_msg.cookie.mbx.chnl_opcode);
break;
}
post_buffs: if (params->async)
if (ctlq_msg.data_len) return 0;
dma_mem = ctlq_msg.ctx.indirect.payload;
else
num_q_msg = 0;
err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq, wait_for_completion_timeout(&xn->completed,
&num_q_msg, &dma_mem); msecs_to_jiffies(params->timeout_ms));
/* If post failed clear the only buffer we supplied */
if (err && dma_mem)
dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
dma_mem->va, dma_mem->pa);
/* Applies only if we are looking for a specific opcode */ /* No need to check the return value; we check the final state of the
if (work_done) * transaction below. It's possible the transaction actually gets more
break; * timeout than specified if we get preempted here but after
* wait_for_completion_timeout returns. This should be non-issue
* however.
*/
idpf_vc_xn_lock(xn);
switch (xn->state) {
case IDPF_VC_XN_SHUTDOWN:
retval = -ENXIO;
goto only_unlock;
case IDPF_VC_XN_WAITING:
dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
params->vc_op, params->timeout_ms);
retval = -ETIME;
break;
case IDPF_VC_XN_COMPLETED_SUCCESS:
retval = xn->reply_sz;
break;
case IDPF_VC_XN_COMPLETED_FAILED:
dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
params->vc_op);
retval = -EIO;
break;
default:
/* Invalid state. */
WARN_ON_ONCE(1);
retval = -EIO;
break;
} }
return err; release_and_unlock:
idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
/* If we receive a VC reply after here, it will be dropped. */
only_unlock:
idpf_vc_xn_unlock(xn);
return retval;
} }
/** /**
* __idpf_wait_for_event - wrapper function for wait on virtchannel response * idpf_vc_xn_forward_async - Handle async reply receives
* @adapter: Driver private data structure * @adapter: private data struct
* @vport: virtual port structure * @xn: transaction to handle
* @state: check on state upon timeout * @ctlq_msg: corresponding ctlq_msg
* @err_check: check if this specific error bit is set
* @timeout: Max time to wait
* *
* Checks if state is set upon expiry of timeout. Returns 0 on success, * For async sends we're going to lose the caller's context so, if an
* negative on failure. * async_handler was provided, it can deal with the reply, otherwise we'll just
* check and report if there is an error.
*/ */
static int __idpf_wait_for_event(struct idpf_adapter *adapter, static int
struct idpf_vport *vport, idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
enum idpf_vport_vc_state state, const struct idpf_ctlq_msg *ctlq_msg)
enum idpf_vport_vc_state err_check,
int timeout)
{ {
int time_to_wait, num_waits; int err = 0;
wait_queue_head_t *vchnl_wq;
unsigned long *vc_state; if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
xn->reply_sz = 0;
err = -EINVAL;
goto release_bufs;
}
time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT); if (xn->async_handler) {
num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT); err = xn->async_handler(adapter, xn, ctlq_msg);
goto release_bufs;
}
if (vport) { if (ctlq_msg->cookie.mbx.chnl_retval) {
vchnl_wq = &vport->vchnl_wq; xn->reply_sz = 0;
vc_state = vport->vc_state; dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
} else { ctlq_msg->cookie.mbx.chnl_opcode);
vchnl_wq = &adapter->vchnl_wq; err = -EINVAL;
vc_state = adapter->vc_state;
} }
while (num_waits) { release_bufs:
int event; idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
return err;
}
/**
* idpf_vc_xn_forward_reply - copy a reply back to receiving thread
* @adapter: driver specific private structure with vcxn_mngr
* @ctlq_msg: controlq message to send back to receiving thread
*/
static int
idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
const struct idpf_ctlq_msg *ctlq_msg)
{
const void *payload = NULL;
size_t payload_size = 0;
struct idpf_vc_xn *xn;
u16 msg_info;
int err = 0;
u16 xn_idx;
u16 salt;
msg_info = ctlq_msg->ctx.sw_cookie.data;
xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
xn_idx);
return -EINVAL;
}
xn = &adapter->vcxn_mngr->ring[xn_idx];
salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
if (xn->salt != salt) {
dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
xn->salt, salt);
return -EINVAL;
}
/* If we are here and a reset is detected do not wait but idpf_vc_xn_lock(xn);
* return. Reset timing is out of drivers control. So switch (xn->state) {
* while we are cleaning resources as part of reset if the case IDPF_VC_XN_WAITING:
* underlying HW mailbox is gone, wait on mailbox messages /* success */
* is not meaningful break;
case IDPF_VC_XN_IDLE:
dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
ctlq_msg->cookie.mbx.chnl_opcode);
err = -EINVAL;
goto out_unlock;
case IDPF_VC_XN_SHUTDOWN:
/* ENXIO is a bit special here as the recv msg loop uses that
* know if it should stop trying to clean the ring if we lost
* the virtchnl. We need to stop playing with registers and
* yield.
*/ */
if (idpf_is_reset_detected(adapter)) err = -ENXIO;
return 0; goto out_unlock;
case IDPF_VC_XN_ASYNC:
err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
idpf_vc_xn_unlock(xn);
return err;
default:
dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
ctlq_msg->cookie.mbx.chnl_opcode);
err = -EBUSY;
goto out_unlock;
}
event = wait_event_timeout(*vchnl_wq, if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
test_and_clear_bit(state, vc_state), dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
msecs_to_jiffies(time_to_wait)); ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
if (event) { xn->reply_sz = 0;
if (test_and_clear_bit(err_check, vc_state)) { xn->state = IDPF_VC_XN_COMPLETED_FAILED;
dev_err(&adapter->pdev->dev, "VC response error %s\n", err = -EINVAL;
idpf_vport_vc_state_str[err_check]); goto out_unlock;
}
return -EINVAL; if (ctlq_msg->cookie.mbx.chnl_retval) {
} xn->reply_sz = 0;
xn->state = IDPF_VC_XN_COMPLETED_FAILED;
err = -EINVAL;
goto out_unlock;
}
return 0; if (ctlq_msg->data_len) {
} payload = ctlq_msg->ctx.indirect.payload->va;
num_waits--; payload_size = ctlq_msg->ctx.indirect.payload->size;
} }
/* Timeout occurred */ xn->reply_sz = payload_size;
dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n", xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
idpf_vport_vc_state_str[state]);
return -ETIMEDOUT; if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
memcpy(xn->reply.iov_base, payload,
min_t(size_t, xn->reply.iov_len, payload_size));
out_unlock:
idpf_vc_xn_unlock(xn);
/* we _cannot_ hold lock while calling complete */
complete(&xn->completed);
return err;
} }
/** /**
* idpf_min_wait_for_event - wait for virtchannel response * idpf_recv_mb_msg - Receive message over mailbox
* @adapter: Driver private data structure * @adapter: Driver specific private structure
* @vport: virtual port structure
* @state: check on state upon timeout
* @err_check: check if this specific error bit is set
* *
* Returns 0 on success, negative on failure. * Will receive control queue message and posts the receive buffer. Returns 0
* on success and negative on failure.
*/ */
static int idpf_min_wait_for_event(struct idpf_adapter *adapter, int idpf_recv_mb_msg(struct idpf_adapter *adapter)
struct idpf_vport *vport,
enum idpf_vport_vc_state state,
enum idpf_vport_vc_state err_check)
{ {
return __idpf_wait_for_event(adapter, vport, state, err_check, struct idpf_ctlq_msg ctlq_msg;
IDPF_WAIT_FOR_EVENT_TIMEO_MIN); struct idpf_dma_mem *dma_mem;
} int post_err, err;
u16 num_recv;
/** while (1) {
* idpf_wait_for_event - wait for virtchannel response /* This will get <= num_recv messages and output how many
* @adapter: Driver private data structure * actually received on num_recv.
* @vport: virtual port structure */
* @state: check on state upon timeout after 500ms num_recv = 1;
* @err_check: check if this specific error bit is set err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
* if (err || !num_recv)
* Returns 0 on success, negative on failure. break;
*/
static int idpf_wait_for_event(struct idpf_adapter *adapter, if (ctlq_msg.data_len) {
struct idpf_vport *vport, dma_mem = ctlq_msg.ctx.indirect.payload;
enum idpf_vport_vc_state state, } else {
enum idpf_vport_vc_state err_check) dma_mem = NULL;
{ num_recv = 0;
/* Increasing the timeout in __IDPF_INIT_SW flow to consider large }
* number of VF's mailbox message responses. When a message is received
* on mailbox, this thread is woken up by the idpf_recv_mb_msg before if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
* the timeout expires. Only in the error case i.e. if no message is idpf_recv_event_msg(adapter, &ctlq_msg);
* received on mailbox, we wait for the complete timeout which is else
* less likely to happen. err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
*/
return __idpf_wait_for_event(adapter, vport, state, err_check, post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
IDPF_WAIT_FOR_EVENT_TIMEO); adapter->hw.arq,
&num_recv, &dma_mem);
/* If post failed clear the only buffer we supplied */
if (post_err) {
if (dma_mem)
dmam_free_coherent(&adapter->pdev->dev,
dma_mem->size, dma_mem->va,
dma_mem->pa);
break;
}
/* virtchnl trying to shutdown, stop cleaning */
if (err == -ENXIO)
break;
}
return err;
} }
/** /**
...@@ -785,7 +776,11 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport) ...@@ -785,7 +776,11 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
*/ */
static int idpf_send_ver_msg(struct idpf_adapter *adapter) static int idpf_send_ver_msg(struct idpf_adapter *adapter)
{ {
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_version_info vvi; struct virtchnl2_version_info vvi;
ssize_t reply_sz;
u32 major, minor;
int err = 0;
if (adapter->virt_ver_maj) { if (adapter->virt_ver_maj) {
vvi.major = cpu_to_le32(adapter->virt_ver_maj); vvi.major = cpu_to_le32(adapter->virt_ver_maj);
...@@ -795,43 +790,29 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter) ...@@ -795,43 +790,29 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter)
vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR); vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
} }
return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi), xn_params.vc_op = VIRTCHNL2_OP_VERSION;
(u8 *)&vvi); xn_params.send_buf.iov_base = &vvi;
} xn_params.send_buf.iov_len = sizeof(vvi);
xn_params.recv_buf = xn_params.send_buf;
/** xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
* idpf_recv_ver_msg - Receive virtchnl version message
* @adapter: Driver specific private structure
*
* Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need
* to send version message again, otherwise negative on failure.
*/
static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
{
struct virtchnl2_version_info vvi;
u32 major, minor;
int err;
err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi, reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
sizeof(vvi)); if (reply_sz < 0)
if (err) return reply_sz;
return err; if (reply_sz < sizeof(vvi))
return -EIO;
major = le32_to_cpu(vvi.major); major = le32_to_cpu(vvi.major);
minor = le32_to_cpu(vvi.minor); minor = le32_to_cpu(vvi.minor);
if (major > IDPF_VIRTCHNL_VERSION_MAJOR) { if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
dev_warn(&adapter->pdev->dev, dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
"Virtchnl major version (%d) greater than supported\n",
major);
return -EINVAL; return -EINVAL;
} }
if (major == IDPF_VIRTCHNL_VERSION_MAJOR && if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
minor > IDPF_VIRTCHNL_VERSION_MINOR) minor > IDPF_VIRTCHNL_VERSION_MINOR)
dev_warn(&adapter->pdev->dev, dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
"Virtchnl minor version (%d) didn't match\n", minor);
/* If we have a mismatch, resend version to update receiver on what /* If we have a mismatch, resend version to update receiver on what
* version we will use. * version we will use.
...@@ -856,7 +837,9 @@ static int idpf_recv_ver_msg(struct idpf_adapter *adapter) ...@@ -856,7 +837,9 @@ static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
*/ */
static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
{ {
struct virtchnl2_get_capabilities caps = { }; struct virtchnl2_get_capabilities caps = {};
struct idpf_vc_xn_params xn_params = {};
ssize_t reply_sz;
caps.csum_caps = caps.csum_caps =
cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
...@@ -913,21 +896,20 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) ...@@ -913,21 +896,20 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_PROMISC | VIRTCHNL2_CAP_PROMISC |
VIRTCHNL2_CAP_LOOPBACK); VIRTCHNL2_CAP_LOOPBACK);
return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps), xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
(u8 *)&caps); xn_params.send_buf.iov_base = &caps;
} xn_params.send_buf.iov_len = sizeof(caps);
xn_params.recv_buf.iov_base = &adapter->caps;
xn_params.recv_buf.iov_len = sizeof(adapter->caps);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
/** reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
* idpf_recv_get_caps_msg - Receive virtchnl get capabilities message if (reply_sz < 0)
* @adapter: Driver specific private structure return reply_sz;
* if (reply_sz < sizeof(adapter->caps))
* Receive virtchnl get capabilities message. Returns 0 on success, negative on return -EIO;
* failure.
*/ return 0;
static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter)
{
return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps,
sizeof(struct virtchnl2_get_capabilities));
} }
/** /**
...@@ -1254,8 +1236,10 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter, ...@@ -1254,8 +1236,10 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q) struct idpf_vport_max_q *max_q)
{ {
struct virtchnl2_create_vport *vport_msg; struct virtchnl2_create_vport *vport_msg;
struct idpf_vc_xn_params xn_params = {};
u16 idx = adapter->next_vport; u16 idx = adapter->next_vport;
int err, buf_size; int err, buf_size;
ssize_t reply_sz;
buf_size = sizeof(struct virtchnl2_create_vport); buf_size = sizeof(struct virtchnl2_create_vport);
if (!adapter->vport_params_reqd[idx]) { if (!adapter->vport_params_reqd[idx]) {
...@@ -1286,35 +1270,38 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter, ...@@ -1286,35 +1270,38 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
return err; return err;
} }
mutex_lock(&adapter->vc_buf_lock);
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size,
(u8 *)vport_msg);
if (err)
goto rel_lock;
err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT,
IDPF_VC_CREATE_VPORT_ERR);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to receive create vport message");
goto rel_lock;
}
if (!adapter->vport_params_recvd[idx]) { if (!adapter->vport_params_recvd[idx]) {
adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
GFP_KERNEL); GFP_KERNEL);
if (!adapter->vport_params_recvd[idx]) { if (!adapter->vport_params_recvd[idx]) {
err = -ENOMEM; err = -ENOMEM;
goto rel_lock; goto free_vport_params;
} }
} }
vport_msg = adapter->vport_params_recvd[idx]; xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN); xn_params.send_buf.iov_base = vport_msg;
xn_params.send_buf.iov_len = buf_size;
xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0) {
err = reply_sz;
goto free_vport_params;
}
if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
err = -EIO;
goto free_vport_params;
}
return 0;
rel_lock: free_vport_params:
mutex_unlock(&adapter->vc_buf_lock); kfree(adapter->vport_params_recvd[idx]);
adapter->vport_params_recvd[idx] = NULL;
kfree(adapter->vport_params_reqd[idx]);
adapter->vport_params_reqd[idx] = NULL;
return err; return err;
} }
...@@ -1366,26 +1353,19 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport) ...@@ -1366,26 +1353,19 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
*/ */
int idpf_send_destroy_vport_msg(struct idpf_vport *vport) int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
{ {
struct idpf_adapter *adapter = vport->adapter; struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id; struct virtchnl2_vport v_id;
int err; ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id); v_id.vport_id = cpu_to_le32(vport->vport_id);
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
xn_params.send_buf.iov_base = &v_id;
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT, xn_params.send_buf.iov_len = sizeof(v_id);
sizeof(v_id), (u8 *)&v_id); xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
if (err) reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
goto rel_lock;
err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT,
IDPF_VC_DESTROY_VPORT_ERR);
rel_lock: return reply_sz < 0 ? reply_sz : 0;
mutex_unlock(&vport->vc_buf_lock);
return err;
} }
/** /**
...@@ -1397,26 +1377,19 @@ int idpf_send_destroy_vport_msg(struct idpf_vport *vport) ...@@ -1397,26 +1377,19 @@ int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
*/ */
int idpf_send_enable_vport_msg(struct idpf_vport *vport) int idpf_send_enable_vport_msg(struct idpf_vport *vport)
{ {
struct idpf_adapter *adapter = vport->adapter; struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id; struct virtchnl2_vport v_id;
int err; ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id); v_id.vport_id = cpu_to_le32(vport->vport_id);
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT, return reply_sz < 0 ? reply_sz : 0;
sizeof(v_id), (u8 *)&v_id);
if (err)
goto rel_lock;
err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT,
IDPF_VC_ENA_VPORT_ERR);
rel_lock:
mutex_unlock(&vport->vc_buf_lock);
return err;
} }
/** /**
...@@ -1428,26 +1401,19 @@ int idpf_send_enable_vport_msg(struct idpf_vport *vport) ...@@ -1428,26 +1401,19 @@ int idpf_send_enable_vport_msg(struct idpf_vport *vport)
*/ */
int idpf_send_disable_vport_msg(struct idpf_vport *vport) int idpf_send_disable_vport_msg(struct idpf_vport *vport)
{ {
struct idpf_adapter *adapter = vport->adapter; struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id; struct virtchnl2_vport v_id;
int err; ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id); v_id.vport_id = cpu_to_le32(vport->vport_id);
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT, xn_params.send_buf.iov_len = sizeof(v_id);
sizeof(v_id), (u8 *)&v_id); xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
if (err) reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
goto rel_lock;
err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT,
IDPF_VC_DIS_VPORT_ERR);
rel_lock:
mutex_unlock(&vport->vc_buf_lock);
return err; return reply_sz < 0 ? reply_sz : 0;
} }
/** /**
...@@ -1459,11 +1425,13 @@ int idpf_send_disable_vport_msg(struct idpf_vport *vport) ...@@ -1459,11 +1425,13 @@ int idpf_send_disable_vport_msg(struct idpf_vport *vport)
*/ */
static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
{ {
struct virtchnl2_config_tx_queues *ctq; struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
struct virtchnl2_txq_info *qi __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz; u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks; int totqs, num_msgs, num_chunks;
struct virtchnl2_txq_info *qi; ssize_t reply_sz;
int err = 0, i, k = 0; int i, k = 0;
totqs = vport->num_txq + vport->num_complq; totqs = vport->num_txq + vport->num_complq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL); qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
...@@ -1524,10 +1492,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1524,10 +1492,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
} }
/* Make sure accounting agrees */ /* Make sure accounting agrees */
if (k != totqs) { if (k != totqs)
err = -EINVAL; return -EINVAL;
goto error;
}
/* Chunk up the queue contexts into multiple messages to avoid /* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large * sending a control queue message buffer that is too large
...@@ -1541,12 +1507,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1541,12 +1507,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks); buf_sz = struct_size(ctq, qinfo, num_chunks);
ctq = kzalloc(buf_sz, GFP_KERNEL); ctq = kzalloc(buf_sz, GFP_KERNEL);
if (!ctq) { if (!ctq)
err = -ENOMEM; return -ENOMEM;
goto error;
}
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) { for (i = 0, k = 0; i < num_msgs; i++) {
memset(ctq, 0, buf_sz); memset(ctq, 0, buf_sz);
...@@ -1554,17 +1519,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1554,17 +1519,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
ctq->num_qinfo = cpu_to_le16(num_chunks); ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
err = idpf_send_mb_msg(vport->adapter, xn_params.send_buf.iov_base = ctq;
VIRTCHNL2_OP_CONFIG_TX_QUEUES, xn_params.send_buf.iov_len = buf_sz;
buf_sz, (u8 *)ctq); reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
if (err) if (reply_sz < 0)
goto mbx_error; return reply_sz;
err = idpf_wait_for_event(vport->adapter, vport,
IDPF_VC_CONFIG_TXQ,
IDPF_VC_CONFIG_TXQ_ERR);
if (err)
goto mbx_error;
k += num_chunks; k += num_chunks;
totqs -= num_chunks; totqs -= num_chunks;
...@@ -1573,13 +1532,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1573,13 +1532,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks); buf_sz = struct_size(ctq, qinfo, num_chunks);
} }
mbx_error: return 0;
mutex_unlock(&vport->vc_buf_lock);
kfree(ctq);
error:
kfree(qi);
return err;
} }
/** /**
...@@ -1591,11 +1544,13 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ...@@ -1591,11 +1544,13 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
*/ */
static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
{ {
struct virtchnl2_config_rx_queues *crq; struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz; u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks; int totqs, num_msgs, num_chunks;
struct virtchnl2_rxq_info *qi; ssize_t reply_sz;
int err = 0, i, k = 0; int i, k = 0;
totqs = vport->num_rxq + vport->num_bufq; totqs = vport->num_rxq + vport->num_bufq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL); qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
...@@ -1676,10 +1631,8 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) ...@@ -1676,10 +1631,8 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
} }
/* Make sure accounting agrees */ /* Make sure accounting agrees */
if (k != totqs) { if (k != totqs)
err = -EINVAL; return -EINVAL;
goto error;
}
/* Chunk up the queue contexts into multiple messages to avoid /* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large * sending a control queue message buffer that is too large
...@@ -1693,12 +1646,11 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) ...@@ -1693,12 +1646,11 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(crq, qinfo, num_chunks); buf_sz = struct_size(crq, qinfo, num_chunks);
crq = kzalloc(buf_sz, GFP_KERNEL); crq = kzalloc(buf_sz, GFP_KERNEL);
if (!crq) { if (!crq)
err = -ENOMEM; return -ENOMEM;
goto error;
}
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) { for (i = 0, k = 0; i < num_msgs; i++) {
memset(crq, 0, buf_sz); memset(crq, 0, buf_sz);
...@@ -1706,17 +1658,11 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) ...@@ -1706,17 +1658,11 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
crq->num_qinfo = cpu_to_le16(num_chunks); crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
err = idpf_send_mb_msg(vport->adapter, xn_params.send_buf.iov_base = crq;
VIRTCHNL2_OP_CONFIG_RX_QUEUES, xn_params.send_buf.iov_len = buf_sz;
buf_sz, (u8 *)crq); reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
if (err) if (reply_sz < 0)
goto mbx_error; return reply_sz;
err = idpf_wait_for_event(vport->adapter, vport,
IDPF_VC_CONFIG_RXQ,
IDPF_VC_CONFIG_RXQ_ERR);
if (err)
goto mbx_error;
k += num_chunks; k += num_chunks;
totqs -= num_chunks; totqs -= num_chunks;
...@@ -1725,42 +1671,28 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) ...@@ -1725,42 +1671,28 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(crq, qinfo, num_chunks); buf_sz = struct_size(crq, qinfo, num_chunks);
} }
mbx_error: return 0;
mutex_unlock(&vport->vc_buf_lock);
kfree(crq);
error:
kfree(qi);
return err;
} }
/** /**
* idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
* queues message * queues message
* @vport: virtual port data structure * @vport: virtual port data structure
* @vc_op: virtchnl op code to send * @ena: if true enable, false disable
* *
* Send enable or disable queues virtchnl message. Returns 0 on success, * Send enable or disable queues virtchnl message. Returns 0 on success,
* negative on failure. * negative on failure.
*/ */
static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
{ {
struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
u32 num_msgs, num_chunks, num_txq, num_rxq, num_q; u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
struct idpf_adapter *adapter = vport->adapter; struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_del_ena_dis_queues *eq;
struct virtchnl2_queue_chunks *qcs; struct virtchnl2_queue_chunks *qcs;
struct virtchnl2_queue_chunk *qc;
u32 config_sz, chunk_sz, buf_sz; u32 config_sz, chunk_sz, buf_sz;
int i, j, k = 0, err = 0; ssize_t reply_sz;
int i, j, k = 0;
/* validate virtchnl op */
switch (vc_op) {
case VIRTCHNL2_OP_ENABLE_QUEUES:
case VIRTCHNL2_OP_DISABLE_QUEUES:
break;
default:
return -EINVAL;
}
num_txq = vport->num_txq + vport->num_complq; num_txq = vport->num_txq + vport->num_complq;
num_rxq = vport->num_rxq + vport->num_bufq; num_rxq = vport->num_rxq + vport->num_bufq;
...@@ -1779,10 +1711,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -1779,10 +1711,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
} }
} }
if (vport->num_txq != k) { if (vport->num_txq != k)
err = -EINVAL; return -EINVAL;
goto error;
}
if (!idpf_is_queue_model_split(vport->txq_model)) if (!idpf_is_queue_model_split(vport->txq_model))
goto setup_rx; goto setup_rx;
...@@ -1794,10 +1724,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -1794,10 +1724,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
} }
if (vport->num_complq != (k - vport->num_txq)) { if (vport->num_complq != (k - vport->num_txq))
err = -EINVAL; return -EINVAL;
goto error;
}
setup_rx: setup_rx:
for (i = 0; i < vport->num_rxq_grp; i++) { for (i = 0; i < vport->num_rxq_grp; i++) {
...@@ -1823,10 +1751,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -1823,10 +1751,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
} }
} }
if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) { if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
err = -EINVAL; return -EINVAL;
goto error;
}
if (!idpf_is_queue_model_split(vport->rxq_model)) if (!idpf_is_queue_model_split(vport->rxq_model))
goto send_msg; goto send_msg;
...@@ -1845,10 +1771,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -1845,10 +1771,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
} }
if (vport->num_bufq != k - (vport->num_txq + if (vport->num_bufq != k - (vport->num_txq +
vport->num_complq + vport->num_complq +
vport->num_rxq)) { vport->num_rxq))
err = -EINVAL; return -EINVAL;
goto error;
}
send_msg: send_msg:
/* Chunk up the queue info into multiple messages */ /* Chunk up the queue info into multiple messages */
...@@ -1861,12 +1785,16 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -1861,12 +1785,16 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
buf_sz = struct_size(eq, chunks.chunks, num_chunks); buf_sz = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_sz, GFP_KERNEL); eq = kzalloc(buf_sz, GFP_KERNEL);
if (!eq) { if (!eq)
err = -ENOMEM; return -ENOMEM;
goto error;
}
mutex_lock(&vport->vc_buf_lock); if (ena) {
xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
} else {
xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
}
for (i = 0, k = 0; i < num_msgs; i++) { for (i = 0, k = 0; i < num_msgs; i++) {
memset(eq, 0, buf_sz); memset(eq, 0, buf_sz);
...@@ -1875,20 +1803,11 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -1875,20 +1803,11 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qcs = &eq->chunks; qcs = &eq->chunks;
memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq); xn_params.send_buf.iov_base = eq;
if (err) xn_params.send_buf.iov_len = buf_sz;
goto mbx_error; reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
if (reply_sz < 0)
if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES) return reply_sz;
err = idpf_wait_for_event(adapter, vport,
IDPF_VC_ENA_QUEUES,
IDPF_VC_ENA_QUEUES_ERR);
else
err = idpf_min_wait_for_event(adapter, vport,
IDPF_VC_DIS_QUEUES,
IDPF_VC_DIS_QUEUES_ERR);
if (err)
goto mbx_error;
k += num_chunks; k += num_chunks;
num_q -= num_chunks; num_q -= num_chunks;
...@@ -1897,13 +1816,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -1897,13 +1816,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
buf_sz = struct_size(eq, chunks.chunks, num_chunks); buf_sz = struct_size(eq, chunks.chunks, num_chunks);
} }
mbx_error: return 0;
mutex_unlock(&vport->vc_buf_lock);
kfree(eq);
error:
kfree(qc);
return err;
} }
/** /**
...@@ -1917,12 +1830,13 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) ...@@ -1917,12 +1830,13 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
*/ */
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{ {
struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
struct virtchnl2_queue_vector_maps *vqvm; struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
struct virtchnl2_queue_vector *vqv; struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz; u32 config_sz, chunk_sz, buf_sz;
u32 num_msgs, num_chunks, num_q; u32 num_msgs, num_chunks, num_q;
int i, j, k = 0, err = 0; ssize_t reply_sz;
int i, j, k = 0;
num_q = vport->num_txq + vport->num_rxq; num_q = vport->num_txq + vport->num_rxq;
...@@ -1952,10 +1866,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) ...@@ -1952,10 +1866,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
} }
} }
if (vport->num_txq != k) { if (vport->num_txq != k)
err = -EINVAL; return -EINVAL;
goto error;
}
for (i = 0; i < vport->num_rxq_grp; i++) { for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
...@@ -1982,15 +1894,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) ...@@ -1982,15 +1894,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
} }
if (idpf_is_queue_model_split(vport->txq_model)) { if (idpf_is_queue_model_split(vport->txq_model)) {
if (vport->num_rxq != k - vport->num_complq) { if (vport->num_rxq != k - vport->num_complq)
err = -EINVAL; return -EINVAL;
goto error;
}
} else { } else {
if (vport->num_rxq != k - vport->num_txq) { if (vport->num_rxq != k - vport->num_txq)
err = -EINVAL; return -EINVAL;
goto error;
}
} }
/* Chunk up the vector info into multiple messages */ /* Chunk up the vector info into multiple messages */
...@@ -2003,39 +1911,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) ...@@ -2003,39 +1911,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks); buf_sz = struct_size(vqvm, qv_maps, num_chunks);
vqvm = kzalloc(buf_sz, GFP_KERNEL); vqvm = kzalloc(buf_sz, GFP_KERNEL);
if (!vqvm) { if (!vqvm)
err = -ENOMEM; return -ENOMEM;
goto error;
}
mutex_lock(&vport->vc_buf_lock); if (map) {
xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
} else {
xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
}
for (i = 0, k = 0; i < num_msgs; i++) { for (i = 0, k = 0; i < num_msgs; i++) {
memset(vqvm, 0, buf_sz); memset(vqvm, 0, buf_sz);
xn_params.send_buf.iov_base = vqvm;
xn_params.send_buf.iov_len = buf_sz;
vqvm->vport_id = cpu_to_le32(vport->vport_id); vqvm->vport_id = cpu_to_le32(vport->vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks); vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
if (map) { reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
err = idpf_send_mb_msg(adapter, if (reply_sz < 0)
VIRTCHNL2_OP_MAP_QUEUE_VECTOR, return reply_sz;
buf_sz, (u8 *)vqvm);
if (!err)
err = idpf_wait_for_event(adapter, vport,
IDPF_VC_MAP_IRQ,
IDPF_VC_MAP_IRQ_ERR);
} else {
err = idpf_send_mb_msg(adapter,
VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
buf_sz, (u8 *)vqvm);
if (!err)
err =
idpf_min_wait_for_event(adapter, vport,
IDPF_VC_UNMAP_IRQ,
IDPF_VC_UNMAP_IRQ_ERR);
}
if (err)
goto mbx_error;
k += num_chunks; k += num_chunks;
num_q -= num_chunks; num_q -= num_chunks;
...@@ -2044,13 +1941,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) ...@@ -2044,13 +1941,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks); buf_sz = struct_size(vqvm, qv_maps, num_chunks);
} }
mbx_error: return 0;
mutex_unlock(&vport->vc_buf_lock);
kfree(vqvm);
error:
kfree(vqv);
return err;
} }
/** /**
...@@ -2062,7 +1953,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) ...@@ -2062,7 +1953,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
*/ */
int idpf_send_enable_queues_msg(struct idpf_vport *vport) int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{ {
return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES); return idpf_send_ena_dis_queues_msg(vport, true);
} }
/** /**
...@@ -2076,7 +1967,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport) ...@@ -2076,7 +1967,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{ {
int err, i; int err, i;
err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES); err = idpf_send_ena_dis_queues_msg(vport, false);
if (err) if (err)
return err; return err;
...@@ -2122,22 +2013,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun ...@@ -2122,22 +2013,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun
*/ */
int idpf_send_delete_queues_msg(struct idpf_vport *vport) int idpf_send_delete_queues_msg(struct idpf_vport *vport)
{ {
struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
struct virtchnl2_create_vport *vport_params; struct virtchnl2_create_vport *vport_params;
struct virtchnl2_queue_reg_chunks *chunks; struct virtchnl2_queue_reg_chunks *chunks;
struct virtchnl2_del_ena_dis_queues *eq; struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config; struct idpf_vport_config *vport_config;
u16 vport_idx = vport->idx; u16 vport_idx = vport->idx;
int buf_size, err; ssize_t reply_sz;
u16 num_chunks; u16 num_chunks;
int buf_size;
vport_config = adapter->vport_config[vport_idx]; vport_config = vport->adapter->vport_config[vport_idx];
if (vport_config->req_qs_chunks) { if (vport_config->req_qs_chunks) {
struct virtchnl2_add_queues *vc_aq = chunks = &vport_config->req_qs_chunks->chunks;
(struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
chunks = &vc_aq->chunks;
} else { } else {
vport_params = adapter->vport_params_recvd[vport_idx]; vport_params = vport->adapter->vport_params_recvd[vport_idx];
chunks = &vport_params->chunks; chunks = &vport_params->chunks;
} }
...@@ -2154,21 +2044,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport) ...@@ -2154,21 +2044,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
num_chunks); num_chunks);
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES, xn_params.send_buf.iov_base = eq;
buf_size, (u8 *)eq); xn_params.send_buf.iov_len = buf_size;
if (err) reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
goto rel_lock;
err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
IDPF_VC_DEL_QUEUES_ERR);
rel_lock: return reply_sz < 0 ? reply_sz : 0;
mutex_unlock(&vport->vc_buf_lock);
kfree(eq);
return err;
} }
/** /**
...@@ -2203,14 +2085,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport) ...@@ -2203,14 +2085,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport)
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq) u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
{ {
struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config; struct idpf_vport_config *vport_config;
struct virtchnl2_add_queues aq = { }; struct virtchnl2_add_queues aq = {};
struct virtchnl2_add_queues *vc_msg;
u16 vport_idx = vport->idx; u16 vport_idx = vport->idx;
int size, err; ssize_t reply_sz;
int size;
vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!vc_msg)
return -ENOMEM;
vport_config = adapter->vport_config[vport_idx]; vport_config = vport->adapter->vport_config[vport_idx];
kfree(vport_config->req_qs_chunks);
vport_config->req_qs_chunks = NULL;
aq.vport_id = cpu_to_le32(vport->vport_id); aq.vport_id = cpu_to_le32(vport->vport_id);
aq.num_tx_q = cpu_to_le16(num_tx_q); aq.num_tx_q = cpu_to_le16(num_tx_q);
...@@ -2218,47 +2107,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, ...@@ -2218,47 +2107,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
aq.num_rx_q = cpu_to_le16(num_rx_q); aq.num_rx_q = cpu_to_le16(num_rx_q);
aq.num_rx_bufq = cpu_to_le16(num_rx_bufq); aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES, xn_params.send_buf.iov_base = &aq;
sizeof(struct virtchnl2_add_queues), (u8 *)&aq); xn_params.send_buf.iov_len = sizeof(aq);
if (err) xn_params.recv_buf.iov_base = vc_msg;
goto rel_lock; xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
/* We want vport to be const to prevent incidental code changes making if (reply_sz < 0)
* changes to the vport config. We're making a special exception here return reply_sz;
* to discard const to use the virtchnl.
*/
err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
if (err)
goto rel_lock;
kfree(vport_config->req_qs_chunks);
vport_config->req_qs_chunks = NULL;
vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
/* compare vc_msg num queues with vport num queues */ /* compare vc_msg num queues with vport num queues */
if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
le16_to_cpu(vc_msg->num_tx_complq) != num_complq || le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) { le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
err = -EINVAL; return -EINVAL;
goto rel_lock;
}
size = struct_size(vc_msg, chunks.chunks, size = struct_size(vc_msg, chunks.chunks,
le16_to_cpu(vc_msg->chunks.num_chunks)); le16_to_cpu(vc_msg->chunks.num_chunks));
vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); if (reply_sz < size)
if (!vport_config->req_qs_chunks) { return -EIO;
err = -ENOMEM;
goto rel_lock;
}
rel_lock: vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock); if (!vport_config->req_qs_chunks)
return -ENOMEM;
return err; return 0;
} }
/** /**
...@@ -2270,53 +2145,49 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, ...@@ -2270,53 +2145,49 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
*/ */
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors) int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
{ {
struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec; struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
struct virtchnl2_alloc_vectors ac = { }; struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_alloc_vectors ac = {};
ssize_t reply_sz;
u16 num_vchunks; u16 num_vchunks;
int size, err; int size;
ac.num_vectors = cpu_to_le16(num_vectors); ac.num_vectors = cpu_to_le16(num_vectors);
mutex_lock(&adapter->vc_buf_lock); rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!rcvd_vec)
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS, return -ENOMEM;
sizeof(ac), (u8 *)&ac);
if (err)
goto rel_lock;
err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS, xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
IDPF_VC_ALLOC_VECTORS_ERR); xn_params.send_buf.iov_base = &ac;
if (err) xn_params.send_buf.iov_len = sizeof(ac);
goto rel_lock; xn_params.recv_buf.iov_base = rcvd_vec;
xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg;
num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks); size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
if (size > sizeof(adapter->vc_msg)) { if (reply_sz < size)
err = -EINVAL; return -EIO;
goto rel_lock;
} if (size > IDPF_CTLQ_MAX_BUF_LEN)
return -EINVAL;
kfree(adapter->req_vec_chunks); kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL; adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL); if (!adapter->req_vec_chunks)
if (!adapter->req_vec_chunks) { return -ENOMEM;
err = -ENOMEM;
goto rel_lock;
}
alloc_vec = adapter->req_vec_chunks; if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) {
kfree(adapter->req_vec_chunks); kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL; adapter->req_vec_chunks = NULL;
err = -EINVAL; return -EINVAL;
} }
rel_lock: return 0;
mutex_unlock(&adapter->vc_buf_lock);
return err;
} }
/** /**
...@@ -2329,29 +2200,24 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter) ...@@ -2329,29 +2200,24 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
{ {
struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
struct virtchnl2_vector_chunks *vcs = &ac->vchunks; struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
int buf_size, err; struct idpf_vc_xn_params xn_params = {};
ssize_t reply_sz;
int buf_size;
buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
mutex_lock(&adapter->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
xn_params.send_buf.iov_base = vcs;
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size, xn_params.send_buf.iov_len = buf_size;
(u8 *)vcs); xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
if (err) reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
goto rel_lock; if (reply_sz < 0)
return reply_sz;
err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS,
IDPF_VC_DEALLOC_VECTORS_ERR);
if (err)
goto rel_lock;
kfree(adapter->req_vec_chunks); kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL; adapter->req_vec_chunks = NULL;
rel_lock: return 0;
mutex_unlock(&adapter->vc_buf_lock);
return err;
} }
/** /**
...@@ -2374,25 +2240,18 @@ static int idpf_get_max_vfs(struct idpf_adapter *adapter) ...@@ -2374,25 +2240,18 @@ static int idpf_get_max_vfs(struct idpf_adapter *adapter)
*/ */
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs) int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
{ {
struct virtchnl2_sriov_vfs_info svi = { }; struct virtchnl2_sriov_vfs_info svi = {};
int err; struct idpf_vc_xn_params xn_params = {};
ssize_t reply_sz;
svi.num_vfs = cpu_to_le16(num_vfs); svi.num_vfs = cpu_to_le16(num_vfs);
xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = &svi;
xn_params.send_buf.iov_len = sizeof(svi);
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
mutex_lock(&adapter->vc_buf_lock); return reply_sz < 0 ? reply_sz : 0;
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS,
sizeof(svi), (u8 *)&svi);
if (err)
goto rel_lock;
err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS,
IDPF_VC_SET_SRIOV_VFS_ERR);
rel_lock:
mutex_unlock(&adapter->vc_buf_lock);
return err;
} }
/** /**
...@@ -2405,10 +2264,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport) ...@@ -2405,10 +2264,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
{ {
struct idpf_netdev_priv *np = netdev_priv(vport->netdev); struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct rtnl_link_stats64 *netstats = &np->netstats; struct rtnl_link_stats64 *netstats = &np->netstats;
struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_vport_stats stats_msg = {};
struct virtchnl2_vport_stats stats_msg = { }; struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport_stats *stats; ssize_t reply_sz;
int err;
/* Don't send get_stats message if the link is down */ /* Don't send get_stats message if the link is down */
if (np->state <= __IDPF_VPORT_DOWN) if (np->state <= __IDPF_VPORT_DOWN)
...@@ -2416,46 +2275,38 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport) ...@@ -2416,46 +2275,38 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
stats_msg.vport_id = cpu_to_le32(vport->vport_id); stats_msg.vport_id = cpu_to_le32(vport->vport_id);
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
xn_params.send_buf.iov_base = &stats_msg;
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS, xn_params.send_buf.iov_len = sizeof(stats_msg);
sizeof(struct virtchnl2_vport_stats), xn_params.recv_buf = xn_params.send_buf;
(u8 *)&stats_msg); xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
if (err)
goto rel_lock;
err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS,
IDPF_VC_GET_STATS_ERR);
if (err)
goto rel_lock;
stats = (struct virtchnl2_vport_stats *)vport->vc_msg; reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (reply_sz < sizeof(stats_msg))
return -EIO;
spin_lock_bh(&np->stats_lock); spin_lock_bh(&np->stats_lock);
netstats->rx_packets = le64_to_cpu(stats->rx_unicast) + netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
le64_to_cpu(stats->rx_multicast) + le64_to_cpu(stats_msg.rx_multicast) +
le64_to_cpu(stats->rx_broadcast); le64_to_cpu(stats_msg.rx_broadcast);
netstats->rx_bytes = le64_to_cpu(stats->rx_bytes); netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
netstats->rx_dropped = le64_to_cpu(stats->rx_discards); le64_to_cpu(stats_msg.tx_multicast) +
netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop); le64_to_cpu(stats_msg.tx_broadcast);
netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length); netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
netstats->tx_packets = le64_to_cpu(stats->tx_unicast) + netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
le64_to_cpu(stats->tx_multicast) + netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
le64_to_cpu(stats->tx_broadcast); netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
netstats->tx_bytes = le64_to_cpu(stats->tx_bytes); netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
netstats->tx_errors = le64_to_cpu(stats->tx_errors);
netstats->tx_dropped = le64_to_cpu(stats->tx_discards); vport->port_stats.vport_stats = stats_msg;
vport->port_stats.vport_stats = *stats;
spin_unlock_bh(&np->stats_lock); spin_unlock_bh(&np->stats_lock);
rel_lock: return 0;
mutex_unlock(&vport->vc_buf_lock);
return err;
} }
/** /**
...@@ -2467,70 +2318,70 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport) ...@@ -2467,70 +2318,70 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
*/ */
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get) int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
{ {
struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
struct virtchnl2_rss_lut *recv_rl; struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data; struct idpf_rss_data *rss_data;
struct virtchnl2_rss_lut *rl;
int buf_size, lut_buf_size; int buf_size, lut_buf_size;
int i, err; ssize_t reply_sz;
int i;
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; rss_data =
&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rl, lut, rss_data->rss_lut_size); buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL); rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl) if (!rl)
return -ENOMEM; return -ENOMEM;
rl->vport_id = cpu_to_le32(vport->vport_id); rl->vport_id = cpu_to_le32(vport->vport_id);
mutex_lock(&vport->vc_buf_lock);
if (!get) { xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = rl;
xn_params.send_buf.iov_len = buf_size;
if (get) {
recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!recv_rl)
return -ENOMEM;
xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
xn_params.recv_buf.iov_base = recv_rl;
xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
} else {
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
for (i = 0; i < rss_data->rss_lut_size; i++) for (i = 0; i < rss_data->rss_lut_size; i++)
rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT, xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
buf_size, (u8 *)rl);
if (err)
goto free_mem;
err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT,
IDPF_VC_SET_RSS_LUT_ERR);
goto free_mem;
} }
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (!get)
return 0;
if (reply_sz < sizeof(struct virtchnl2_rss_lut))
return -EIO;
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT, lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
buf_size, (u8 *)rl); if (reply_sz < lut_buf_size)
if (err) return -EIO;
goto free_mem;
err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT,
IDPF_VC_GET_RSS_LUT_ERR);
if (err)
goto free_mem;
recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg; /* size didn't change, we can reuse existing lut buf */
if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
goto do_memcpy; goto do_memcpy;
rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
kfree(rss_data->rss_lut); kfree(rss_data->rss_lut);
lut_buf_size = rss_data->rss_lut_size * sizeof(u32);
rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
if (!rss_data->rss_lut) { if (!rss_data->rss_lut) {
rss_data->rss_lut_size = 0; rss_data->rss_lut_size = 0;
err = -ENOMEM; return -ENOMEM;
goto free_mem;
} }
do_memcpy: do_memcpy:
memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size); memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
free_mem:
mutex_unlock(&vport->vc_buf_lock);
kfree(rl);
return err; return 0;
} }
/** /**
...@@ -2542,68 +2393,70 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get) ...@@ -2542,68 +2393,70 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
*/ */
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get) int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
{ {
struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
struct virtchnl2_rss_key *recv_rk; struct virtchnl2_rss_key *rk __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data; struct idpf_rss_data *rss_data;
struct virtchnl2_rss_key *rk; ssize_t reply_sz;
int i, buf_size, err; int i, buf_size;
u16 key_size;
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; rss_data =
&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
rk = kzalloc(buf_size, GFP_KERNEL); rk = kzalloc(buf_size, GFP_KERNEL);
if (!rk) if (!rk)
return -ENOMEM; return -ENOMEM;
rk->vport_id = cpu_to_le32(vport->vport_id); rk->vport_id = cpu_to_le32(vport->vport_id);
mutex_lock(&vport->vc_buf_lock); xn_params.send_buf.iov_base = rk;
xn_params.send_buf.iov_len = buf_size;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
if (get) { if (get) {
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY, recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
buf_size, (u8 *)rk); if (!recv_rk)
if (err) return -ENOMEM;
goto error;
xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY, xn_params.recv_buf.iov_base = recv_rk;
IDPF_VC_GET_RSS_KEY_ERR); xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
if (err)
goto error;
recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg;
if (rss_data->rss_key_size !=
le16_to_cpu(recv_rk->key_len)) {
rss_data->rss_key_size =
min_t(u16, NETDEV_RSS_KEY_LEN,
le16_to_cpu(recv_rk->key_len));
kfree(rss_data->rss_key);
rss_data->rss_key = kzalloc(rss_data->rss_key_size,
GFP_KERNEL);
if (!rss_data->rss_key) {
rss_data->rss_key_size = 0;
err = -ENOMEM;
goto error;
}
}
memcpy(rss_data->rss_key, recv_rk->key_flex,
rss_data->rss_key_size);
} else { } else {
rk->key_len = cpu_to_le16(rss_data->rss_key_size); rk->key_len = cpu_to_le16(rss_data->rss_key_size);
for (i = 0; i < rss_data->rss_key_size; i++) for (i = 0; i < rss_data->rss_key_size; i++)
rk->key_flex[i] = rss_data->rss_key[i]; rk->key_flex[i] = rss_data->rss_key[i];
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY, xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
buf_size, (u8 *)rk); }
if (err)
goto error; reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (!get)
return 0;
if (reply_sz < sizeof(struct virtchnl2_rss_key))
return -EIO;
key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
le16_to_cpu(recv_rk->key_len));
if (reply_sz < key_size)
return -EIO;
/* key len didn't change, reuse existing buf */
if (rss_data->rss_key_size == key_size)
goto do_memcpy;
err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY, rss_data->rss_key_size = key_size;
IDPF_VC_SET_RSS_KEY_ERR); kfree(rss_data->rss_key);
rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
if (!rss_data->rss_key) {
rss_data->rss_key_size = 0;
return -ENOMEM;
} }
error: do_memcpy:
mutex_unlock(&vport->vc_buf_lock); memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
kfree(rk);
return err; return 0;
} }
/** /**
...@@ -2655,13 +2508,15 @@ static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype, ...@@ -2655,13 +2508,15 @@ static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
*/ */
int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{ {
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup; struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
struct virtchnl2_get_ptype_info get_ptype_info;
int max_ptype, ptypes_recvd = 0, ptype_offset; int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter; struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_get_ptype_info *ptype_info; struct idpf_vc_xn_params xn_params = {};
u16 next_ptype_id = 0; u16 next_ptype_id = 0;
int err = 0, i, j, k; ssize_t reply_sz;
int i, j, k;
if (idpf_is_queue_model_split(vport->rxq_model)) if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE; max_ptype = IDPF_RX_MAX_PTYPE;
...@@ -2670,43 +2525,44 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) ...@@ -2670,43 +2525,44 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup)); memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
if (!get_ptype_info)
return -ENOMEM;
ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!ptype_info) if (!ptype_info)
return -ENOMEM; return -ENOMEM;
mutex_lock(&adapter->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
xn_params.send_buf.iov_base = get_ptype_info;
xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
xn_params.recv_buf.iov_base = ptype_info;
xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
while (next_ptype_id < max_ptype) { while (next_ptype_id < max_ptype) {
get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id); get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype) if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
get_ptype_info.num_ptypes = get_ptype_info->num_ptypes =
cpu_to_le16(max_ptype - next_ptype_id); cpu_to_le16(max_ptype - next_ptype_id);
else else
get_ptype_info.num_ptypes = get_ptype_info->num_ptypes =
cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF); cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO, reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
sizeof(struct virtchnl2_get_ptype_info), if (reply_sz < 0)
(u8 *)&get_ptype_info); return reply_sz;
if (err)
goto vc_buf_unlock;
err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO,
IDPF_VC_GET_PTYPE_INFO_ERR);
if (err)
goto vc_buf_unlock;
memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN); if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
return -EIO;
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
if (ptypes_recvd > max_ptype) { if (ptypes_recvd > max_ptype)
err = -EINVAL; return -EINVAL;
goto vc_buf_unlock;
}
next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) + next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
le16_to_cpu(get_ptype_info.num_ptypes); le16_to_cpu(get_ptype_info->num_ptypes);
ptype_offset = IDPF_RX_PTYPE_HDR_SZ; ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
...@@ -2719,17 +2575,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) ...@@ -2719,17 +2575,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
((u8 *)ptype_info + ptype_offset); ((u8 *)ptype_info + ptype_offset);
ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) { if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
err = -EINVAL; return -EINVAL;
goto vc_buf_unlock;
}
/* 0xFFFF indicates end of ptypes */ /* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) == if (le16_to_cpu(ptype->ptype_id_10) ==
IDPF_INVALID_PTYPE_ID) { IDPF_INVALID_PTYPE_ID)
err = 0; return 0;
goto vc_buf_unlock;
}
if (idpf_is_queue_model_split(vport->rxq_model)) if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10); k = le16_to_cpu(ptype->ptype_id_10);
...@@ -2857,11 +2709,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) ...@@ -2857,11 +2709,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
} }
} }
vc_buf_unlock: return 0;
mutex_unlock(&adapter->vc_buf_lock);
kfree(ptype_info);
return err;
} }
/** /**
...@@ -2873,27 +2721,20 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) ...@@ -2873,27 +2721,20 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
*/ */
int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport) int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
{ {
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_loopback loopback; struct virtchnl2_loopback loopback;
int err; ssize_t reply_sz;
loopback.vport_id = cpu_to_le32(vport->vport_id); loopback.vport_id = cpu_to_le32(vport->vport_id);
loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK); loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
mutex_lock(&vport->vc_buf_lock); xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK, xn_params.send_buf.iov_base = &loopback;
sizeof(loopback), (u8 *)&loopback); xn_params.send_buf.iov_len = sizeof(loopback);
if (err) reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
goto rel_lock;
err = idpf_wait_for_event(vport->adapter, vport,
IDPF_VC_LOOPBACK_STATE,
IDPF_VC_LOOPBACK_STATE_ERR);
rel_lock:
mutex_unlock(&vport->vc_buf_lock);
return err; return reply_sz < 0 ? reply_sz : 0;
} }
/** /**
...@@ -2958,7 +2799,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter) ...@@ -2958,7 +2799,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
return -ENOENT; return -ENOENT;
} }
adapter->state = __IDPF_STARTUP; adapter->state = __IDPF_VER_CHECK;
return 0; return 0;
} }
...@@ -3055,35 +2896,42 @@ int idpf_vc_core_init(struct idpf_adapter *adapter) ...@@ -3055,35 +2896,42 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
u16 num_max_vports; u16 num_max_vports;
int err = 0; int err = 0;
if (!adapter->vcxn_mngr) {
adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
if (!adapter->vcxn_mngr) {
err = -ENOMEM;
goto init_failed;
}
}
idpf_vc_xn_init(adapter->vcxn_mngr);
while (adapter->state != __IDPF_INIT_SW) { while (adapter->state != __IDPF_INIT_SW) {
switch (adapter->state) { switch (adapter->state) {
case __IDPF_STARTUP:
if (idpf_send_ver_msg(adapter))
goto init_failed;
adapter->state = __IDPF_VER_CHECK;
goto restart;
case __IDPF_VER_CHECK: case __IDPF_VER_CHECK:
err = idpf_recv_ver_msg(adapter); err = idpf_send_ver_msg(adapter);
if (err == -EIO) { switch (err) {
return err; case 0:
} else if (err == -EAGAIN) { /* success, move state machine forward */
adapter->state = __IDPF_STARTUP; adapter->state = __IDPF_GET_CAPS;
fallthrough;
case -EAGAIN:
goto restart; goto restart;
} else if (err) { default:
/* Something bad happened, try again but only a
* few times.
*/
goto init_failed; goto init_failed;
} }
if (idpf_send_get_caps_msg(adapter))
goto init_failed;
adapter->state = __IDPF_GET_CAPS;
goto restart;
case __IDPF_GET_CAPS: case __IDPF_GET_CAPS:
if (idpf_recv_get_caps_msg(adapter)) err = idpf_send_get_caps_msg(adapter);
if (err)
goto init_failed; goto init_failed;
adapter->state = __IDPF_INIT_SW; adapter->state = __IDPF_INIT_SW;
break; break;
default: default:
dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
adapter->state); adapter->state);
err = -EINVAL;
goto init_failed; goto init_failed;
} }
break; break;
...@@ -3142,7 +2990,9 @@ int idpf_vc_core_init(struct idpf_adapter *adapter) ...@@ -3142,7 +2990,9 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
queue_delayed_work(adapter->init_wq, &adapter->init_task, queue_delayed_work(adapter->init_wq, &adapter->init_task,
msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
goto no_err; set_bit(IDPF_VC_CORE_INIT, adapter->flags);
return 0;
err_intr_req: err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task); cancel_delayed_work_sync(&adapter->serv_task);
...@@ -3151,7 +3001,6 @@ int idpf_vc_core_init(struct idpf_adapter *adapter) ...@@ -3151,7 +3001,6 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
err_netdev_alloc: err_netdev_alloc:
kfree(adapter->vports); kfree(adapter->vports);
adapter->vports = NULL; adapter->vports = NULL;
no_err:
return err; return err;
init_failed: init_failed:
...@@ -3168,7 +3017,9 @@ int idpf_vc_core_init(struct idpf_adapter *adapter) ...@@ -3168,7 +3017,9 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
* register writes might not have taken effect. Retry to initialize * register writes might not have taken effect. Retry to initialize
* the mailbox again * the mailbox again
*/ */
adapter->state = __IDPF_STARTUP; adapter->state = __IDPF_VER_CHECK;
if (adapter->vcxn_mngr)
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_dflt_mbx(adapter); idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags); set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
...@@ -3184,29 +3035,22 @@ int idpf_vc_core_init(struct idpf_adapter *adapter) ...@@ -3184,29 +3035,22 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
*/ */
void idpf_vc_core_deinit(struct idpf_adapter *adapter) void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{ {
int i; if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
return;
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_task(adapter); idpf_deinit_task(adapter);
idpf_intr_rel(adapter); idpf_intr_rel(adapter);
/* Set all bits as we dont know on which vc_state the vhnl_wq is
* waiting on and wakeup the virtchnl workqueue even if it is waiting
* for the response as we are going down
*/
for (i = 0; i < IDPF_VC_NBITS; i++)
set_bit(i, adapter->vc_state);
wake_up(&adapter->vchnl_wq);
cancel_delayed_work_sync(&adapter->serv_task); cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task); cancel_delayed_work_sync(&adapter->mbx_task);
idpf_vport_params_buf_rel(adapter); idpf_vport_params_buf_rel(adapter);
/* Clear all the bits */
for (i = 0; i < IDPF_VC_NBITS; i++)
clear_bit(i, adapter->vc_state);
kfree(adapter->vports); kfree(adapter->vports);
adapter->vports = NULL; adapter->vports = NULL;
clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
} }
/** /**
...@@ -3621,6 +3465,75 @@ u32 idpf_get_vport_id(struct idpf_vport *vport) ...@@ -3621,6 +3465,75 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
return le32_to_cpu(vport_msg->vport_id); return le32_to_cpu(vport_msg->vport_id);
} }
/**
* idpf_mac_filter_async_handler - Async callback for mac filters
* @adapter: private data struct
* @xn: transaction for message
* @ctlq_msg: received message
*
* In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
* holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
* situation to deal with errors returned on the reply. The best we can
* ultimately do is remove it from our list of mac filters and report the
* error.
*/
static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
struct idpf_vc_xn *xn,
const struct idpf_ctlq_msg *ctlq_msg)
{
struct virtchnl2_mac_addr_list *ma_list;
struct idpf_vport_config *vport_config;
struct virtchnl2_mac_addr *mac_addr;
struct idpf_mac_filter *f, *tmp;
struct list_head *ma_list_head;
struct idpf_vport *vport;
u16 num_entries;
int i;
/* if success we're done, we're only here if something bad happened */
if (!ctlq_msg->cookie.mbx.chnl_retval)
return 0;
/* make sure at least struct is there */
if (xn->reply_sz < sizeof(*ma_list))
goto invalid_payload;
ma_list = ctlq_msg->ctx.indirect.payload->va;
mac_addr = ma_list->mac_addr_list;
num_entries = le16_to_cpu(ma_list->num_mac_addr);
/* we should have received a buffer at least this big */
if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
goto invalid_payload;
vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
if (!vport)
goto invalid_payload;
vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
ma_list_head = &vport_config->user_config.mac_filter_list;
/* We can't do much to reconcile bad filters at this point, however we
* should at least remove them from our list one way or the other so we
* have some idea what good filters we have.
*/
spin_lock_bh(&vport_config->mac_filter_list_lock);
list_for_each_entry_safe(f, tmp, ma_list_head, list)
for (i = 0; i < num_entries; i++)
if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
list_del(&f->list);
spin_unlock_bh(&vport_config->mac_filter_list_lock);
dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
xn->vc_op);
return 0;
invalid_payload:
dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
xn->vc_op, xn->reply_sz);
return -EINVAL;
}
/** /**
* idpf_add_del_mac_filters - Add/del mac filters * idpf_add_del_mac_filters - Add/del mac filters
* @vport: Virtual port data structure * @vport: Virtual port data structure
...@@ -3634,17 +3547,21 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, ...@@ -3634,17 +3547,21 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
struct idpf_netdev_priv *np, struct idpf_netdev_priv *np,
bool add, bool async) bool add, bool async)
{ {
struct virtchnl2_mac_addr_list *ma_list = NULL; struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
struct idpf_adapter *adapter = np->adapter; struct idpf_adapter *adapter = np->adapter;
struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config; struct idpf_vport_config *vport_config;
enum idpf_vport_config_flags mac_flag;
struct pci_dev *pdev = adapter->pdev;
enum idpf_vport_vc_state vc, vc_err;
struct virtchnl2_mac_addr *mac_addr;
struct idpf_mac_filter *f, *tmp;
u32 num_msgs, total_filters = 0; u32 num_msgs, total_filters = 0;
int i = 0, k, err = 0; struct idpf_mac_filter *f;
u32 vop; ssize_t reply_sz;
int i = 0, k;
xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
VIRTCHNL2_OP_DEL_MAC_ADDR;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.async = async;
xn_params.async_handler = idpf_mac_filter_async_handler;
vport_config = adapter->vport_config[np->vport_idx]; vport_config = adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock); spin_lock_bh(&vport_config->mac_filter_list_lock);
...@@ -3668,13 +3585,13 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, ...@@ -3668,13 +3585,13 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr), mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
GFP_ATOMIC); GFP_ATOMIC);
if (!mac_addr) { if (!mac_addr) {
err = -ENOMEM;
spin_unlock_bh(&vport_config->mac_filter_list_lock); spin_unlock_bh(&vport_config->mac_filter_list_lock);
goto error;
return -ENOMEM;
} }
list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list, list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
list) { list) {
if (add && f->add) { if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr); ether_addr_copy(mac_addr[i].addr, f->macaddr);
i++; i++;
...@@ -3693,26 +3610,11 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, ...@@ -3693,26 +3610,11 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
spin_unlock_bh(&vport_config->mac_filter_list_lock); spin_unlock_bh(&vport_config->mac_filter_list_lock);
if (add) {
vop = VIRTCHNL2_OP_ADD_MAC_ADDR;
vc = IDPF_VC_ADD_MAC_ADDR;
vc_err = IDPF_VC_ADD_MAC_ADDR_ERR;
mac_flag = IDPF_VPORT_ADD_MAC_REQ;
} else {
vop = VIRTCHNL2_OP_DEL_MAC_ADDR;
vc = IDPF_VC_DEL_MAC_ADDR;
vc_err = IDPF_VC_DEL_MAC_ADDR_ERR;
mac_flag = IDPF_VPORT_DEL_MAC_REQ;
}
/* Chunk up the filters into multiple messages to avoid /* Chunk up the filters into multiple messages to avoid
* sending a control queue message buffer that is too large * sending a control queue message buffer that is too large
*/ */
num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG); num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
if (!async)
mutex_lock(&vport->vc_buf_lock);
for (i = 0, k = 0; i < num_msgs; i++) { for (i = 0, k = 0; i < num_msgs; i++) {
u32 entries_size, buf_size, num_entries; u32 entries_size, buf_size, num_entries;
...@@ -3724,10 +3626,8 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, ...@@ -3724,10 +3626,8 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) { if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
kfree(ma_list); kfree(ma_list);
ma_list = kzalloc(buf_size, GFP_ATOMIC); ma_list = kzalloc(buf_size, GFP_ATOMIC);
if (!ma_list) { if (!ma_list)
err = -ENOMEM; return -ENOMEM;
goto list_prep_error;
}
} else { } else {
memset(ma_list, 0, buf_size); memset(ma_list, 0, buf_size);
} }
...@@ -3736,34 +3636,17 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, ...@@ -3736,34 +3636,17 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
ma_list->num_mac_addr = cpu_to_le16(num_entries); ma_list->num_mac_addr = cpu_to_le16(num_entries);
memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
if (async) xn_params.send_buf.iov_base = ma_list;
set_bit(mac_flag, vport_config->flags); xn_params.send_buf.iov_len = buf_size;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list); if (reply_sz < 0)
if (err) return reply_sz;
goto mbx_error;
if (!async) {
err = idpf_wait_for_event(adapter, vport, vc, vc_err);
if (err)
goto mbx_error;
}
k += num_entries; k += num_entries;
total_filters -= num_entries; total_filters -= num_entries;
} }
mbx_error: return 0;
if (!async)
mutex_unlock(&vport->vc_buf_lock);
kfree(ma_list);
list_prep_error:
kfree(mac_addr);
error:
if (err)
dev_err(&pdev->dev, "Failed to add or del mac filters %d", err);
return err;
} }
/** /**
...@@ -3780,9 +3663,10 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter, ...@@ -3780,9 +3663,10 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data, struct idpf_vport_user_config_data *config_data,
u32 vport_id) u32 vport_id)
{ {
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_promisc_info vpi; struct virtchnl2_promisc_info vpi;
ssize_t reply_sz;
u16 flags = 0; u16 flags = 0;
int err;
if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
flags |= VIRTCHNL2_UNICAST_PROMISC; flags |= VIRTCHNL2_UNICAST_PROMISC;
...@@ -3792,9 +3676,13 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter, ...@@ -3792,9 +3676,13 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
vpi.vport_id = cpu_to_le32(vport_id); vpi.vport_id = cpu_to_le32(vport_id);
vpi.flags = cpu_to_le16(flags); vpi.flags = cpu_to_le16(flags);
err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE, xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
sizeof(struct virtchnl2_promisc_info), xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
(u8 *)&vpi); xn_params.send_buf.iov_base = &vpi;
xn_params.send_buf.iov_len = sizeof(vpi);
/* setting promiscuous is only ever done asynchronously */
xn_params.async = true;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return err; return reply_sz < 0 ? reply_sz : 0;
} }
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2024 Intel Corporation */
#ifndef _IDPF_VIRTCHNL_H_
#define _IDPF_VIRTCHNL_H_
struct idpf_adapter;
struct idpf_netdev_priv;
struct idpf_vec_regs;
struct idpf_vport;
struct idpf_vport_max_q;
struct idpf_vport_user_config_data;
int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
void idpf_vc_core_deinit(struct idpf_adapter *adapter);
int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
struct idpf_vec_regs *reg_vals);
int idpf_queue_reg_init(struct idpf_vport *vport);
int idpf_vport_queue_ids_init(struct idpf_vport *vport);
int idpf_recv_mb_msg(struct idpf_adapter *adapter);
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg, u16 cookie);
void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport);
int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
int idpf_send_enable_vport_msg(struct idpf_vport *vport);
int idpf_send_disable_vport_msg(struct idpf_vport *vport);
int idpf_vport_adjust_qs(struct idpf_vport *vport);
int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_send_delete_queues_msg(struct idpf_vport *vport);
int idpf_send_enable_queues_msg(struct idpf_vport *vport);
int idpf_send_disable_queues_msg(struct idpf_vport *vport);
int idpf_send_config_queues_msg(struct idpf_vport *vport);
int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
u16 *vecids, int num_vecids,
struct virtchnl2_vector_chunks *chunks);
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
int idpf_add_del_mac_filters(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
bool add, bool async);
int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id);
int idpf_check_supported_desc_ids(struct idpf_vport *vport);
int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
int idpf_send_get_stats_msg(struct idpf_vport *vport);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
#endif /* _IDPF_VIRTCHNL_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment