Commit d358aa9a authored by Greg Rose's avatar Greg Rose Committed by Jeff Kirsher

i40evf: init code and hardware support

This patch implements the hardware specific init and management.
Signed-off-by: default avatarMitch Williams <mitch.a.williams@intel.com>
Signed-off-by: default avatarGreg Rose <gregory.v.rose@intel.com>
Tested-by: default avatarSibai Li <sibai.li@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 5321a21c
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include "i40e_status.h"
#include "i40e_type.h"
#include "i40e_register.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
* This assumes the alloc_asq and alloc_arq functions have already been called
**/
static void i40e_adminq_init_regs(struct i40e_hw *hw)
{
/* set head and tail registers in our local struct */
if (hw->mac.type == I40E_MAC_VF) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
hw->aq.asq.len = I40E_VF_ATQLEN1;
hw->aq.arq.tail = I40E_VF_ARQT1;
hw->aq.arq.head = I40E_VF_ARQH1;
hw->aq.arq.len = I40E_VF_ARQLEN1;
} else {
hw->aq.asq.tail = I40E_PF_ATQT;
hw->aq.asq.head = I40E_PF_ATQH;
hw->aq.asq.len = I40E_PF_ATQLEN;
hw->aq.arq.tail = I40E_PF_ARQT;
hw->aq.arq.head = I40E_PF_ARQH;
hw->aq.arq.len = I40E_PF_ARQLEN;
}
}
/**
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details)));
if (ret_code) {
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}
return ret_code;
}
/**
* i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
return ret_code;
}
/**
* i40e_free_adminq_asq - Free Admin Queue send rings
* @hw: pointer to the hardware structure
*
* This assumes the posted send buffers have already been cleaned
* and de-allocated
**/
static void i40e_free_adminq_asq(struct i40e_hw *hw)
{
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
* i40e_free_adminq_arq - Free Admin Queue receive rings
* @hw: pointer to the hardware structure
*
* This assumes the posted receive buffers have already been cleaned
* and de-allocated
**/
static void i40e_free_adminq_arq(struct i40e_hw *hw)
{
i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
int i;
/* We'll be allocating the buffer info memory first, then we can
* allocate the mapped buffers for the event processing
*/
/* buffer_info structures do not need alignment */
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
bi = &hw->aq.arq.r.arq_bi[i];
ret_code = i40e_allocate_dma_mem(hw, bi,
i40e_mem_arq_buf,
hw->aq.arq_buf_size,
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
goto unwind_alloc_arq_bufs;
/* now configure the descriptors for use */
desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
*/
desc->datalen = cpu_to_le16((u16)bi->size);
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
desc->params.external.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
desc->params.external.param0 = 0;
desc->params.external.param1 = 0;
}
alloc_arq_bufs:
return ret_code;
unwind_alloc_arq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code;
}
/**
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_dma_mem *bi;
int i;
/* No mapped memory needed yet, just the buffer info structures */
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
bi = &hw->aq.asq.r.asq_bi[i];
ret_code = i40e_allocate_dma_mem(hw, bi,
i40e_mem_asq_buf,
hw->aq.asq_buf_size,
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
goto unwind_alloc_asq_bufs;
}
alloc_asq_bufs:
return ret_code;
unwind_alloc_asq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code;
}
/**
* i40e_free_arq_bufs - Free receive queue buffer info elements
* @hw: pointer to the hardware structure
**/
static void i40e_free_arq_bufs(struct i40e_hw *hw)
{
int i;
/* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
/* free the descriptor memory */
i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
/* free the dma header */
i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
/**
* i40e_free_asq_bufs - Free send queue buffer info elements
* @hw: pointer to the hardware structure
**/
static void i40e_free_asq_bufs(struct i40e_hw *hw)
{
int i;
/* only unmap if the address is non-NULL */
for (i = 0; i < hw->aq.num_asq_entries; i++)
if (hw->aq.asq.r.asq_bi[i].pa)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
/* free the buffer info list */
i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
/* free the descriptor memory */
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
/* free the dma header */
i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
/**
* i40e_config_asq_regs - configure ASQ registers
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the transmit queue
**/
static void i40e_config_asq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1,
upper_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQBAL1,
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
} else {
/* configure the transmit queue */
wr32(hw, I40E_PF_ATQBAH,
upper_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQBAL,
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
}
}
/**
* i40e_config_arq_regs - ARQ register configuration
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the receive (event queue)
**/
static void i40e_config_arq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1,
upper_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQBAL1,
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
} else {
/* configure the receive queue */
wr32(hw, I40E_PF_ARQBAH,
upper_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQBAL,
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
}
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
}
/**
* i40e_init_asq - main initialization routine for ASQ
* @hw: pointer to the hardware structure
*
* This is the main initialization routine for the Admin Send Queue
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.arq_buf_size
*
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
static i40e_status i40e_init_asq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
ret_code = I40E_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_asq_entries == 0) ||
(hw->aq.asq_buf_size == 0)) {
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
hw->aq.asq.count = hw->aq.num_asq_entries;
/* allocate the ring memory */
ret_code = i40e_alloc_adminq_asq_ring(hw);
if (ret_code)
goto init_adminq_exit;
/* allocate buffers in the rings */
ret_code = i40e_alloc_asq_bufs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* initialize base registers */
i40e_config_asq_regs(hw);
/* success! */
goto init_adminq_exit;
init_adminq_free_rings:
i40e_free_adminq_asq(hw);
init_adminq_exit:
return ret_code;
}
/**
* i40e_init_arq - initialize ARQ
* @hw: pointer to the hardware structure
*
* The main initialization routine for the Admin Receive (Event) Queue.
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.arq_buf_size
*
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
static i40e_status i40e_init_arq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
ret_code = I40E_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.arq_buf_size == 0)) {
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
hw->aq.arq.count = hw->aq.num_arq_entries;
/* allocate the ring memory */
ret_code = i40e_alloc_adminq_arq_ring(hw);
if (ret_code)
goto init_adminq_exit;
/* allocate buffers in the rings */
ret_code = i40e_alloc_arq_bufs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* initialize base registers */
i40e_config_arq_regs(hw);
/* success! */
goto init_adminq_exit;
init_adminq_free_rings:
i40e_free_adminq_arq(hw);
init_adminq_exit:
return ret_code;
}
/**
* i40e_shutdown_asq - shutdown the ASQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Send Queue
**/
static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (hw->aq.asq.count == 0)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
wr32(hw, hw->aq.asq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.asq_mutex);
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_asq_bufs(hw);
mutex_unlock(&hw->aq.asq_mutex);
return ret_code;
}
/**
* i40e_shutdown_arq - shutdown ARQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Receive Queue
**/
static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (hw->aq.arq.count == 0)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
wr32(hw, hw->aq.arq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.arq_mutex);
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_arq_bufs(hw);
mutex_unlock(&hw->aq.arq_mutex);
return ret_code;
}
/**
* i40evf_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.num_arq_entries
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
i40e_status i40evf_init_adminq(struct i40e_hw *hw)
{
i40e_status ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
(hw->aq.arq_buf_size == 0) ||
(hw->aq.asq_buf_size == 0)) {
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
/* initialize locks */
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
/* Set up register offsets */
i40e_adminq_init_regs(hw);
/* allocate the ASQ */
ret_code = i40e_init_asq(hw);
if (ret_code)
goto init_adminq_destroy_locks;
/* allocate the ARQ */
ret_code = i40e_init_arq(hw);
if (ret_code)
goto init_adminq_free_asq;
/* success! */
goto init_adminq_exit;
init_adminq_free_asq:
i40e_shutdown_asq(hw);
init_adminq_destroy_locks:
init_adminq_exit:
return ret_code;
}
/**
* i40evf_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (i40evf_check_asq_alive(hw))
i40evf_aq_queue_shutdown(hw, true);
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
/* destroy the locks */
return ret_code;
}
/**
* i40e_clean_asq - cleans Admin send queue
* @hw: pointer to the hardware structure
*
* returns the number of free desc
**/
static u16 i40e_clean_asq(struct i40e_hw *hw)
{
struct i40e_adminq_ring *asq = &(hw->aq.asq);
struct i40e_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
struct i40e_aq_desc desc_cb;
struct i40e_aq_desc *desc;
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
(I40E_ADMINQ_CALLBACK)details->callback;
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
memset((void *)details, 0,
sizeof(struct i40e_asq_cmd_details));
ntc++;
if (ntc == asq->count)
ntc = 0;
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
}
asq->next_to_clean = ntc;
return I40E_DESC_UNUSED(asq);
}
/**
* i40evf_asq_done - check if FW has processed the Admin Send Queue
* @hw: pointer to the hw struct
*
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
bool i40evf_asq_done(struct i40e_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
}
/**
* i40evf_asq_send_command - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
* @cmd_details: pointer to command details structure
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
struct i40e_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
status = I40E_ERR_QUEUE_EMPTY;
goto asq_send_command_exit;
}
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
/* If the cmd_details are defined copy the cookie. The
* cpu_to_le32 is not needed here because the data is ignored
* by the FW, only used by the driver
*/
if (details->cookie) {
desc->cookie_high =
cpu_to_le32(upper_32_bits(details->cookie));
desc->cookie_low =
cpu_to_le32(lower_32_bits(details->cookie));
}
} else {
memset(details, 0, sizeof(struct i40e_asq_cmd_details));
}
/* clear requested flags and then set additional flags if defined */
desc->flags &= ~cpu_to_le16(details->flags_dis);
desc->flags |= cpu_to_le16(details->flags_ena);
mutex_lock(&hw->aq.asq_mutex);
if (buff_size > hw->aq.asq_buf_size) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Invalid buffer size: %d.\n",
buff_size);
status = I40E_ERR_INVALID_SIZE;
goto asq_send_command_error;
}
if (details->postpone && !details->async) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Async flag not set along with postpone flag");
status = I40E_ERR_PARAM;
goto asq_send_command_error;
}
/* call clean and check queue available function to reclaim the
* descriptors that were processed by FW, the function returns the
* number of desc available
*/
/* the clean function called here could be called in a separate thread
* in case of asynchronous completions
*/
if (i40e_clean_asq(hw) == 0) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Error queue is full.\n");
status = I40E_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
/* initialize the temp desc pointer with the right desc */
desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
/* if the desc is available copy the temp desc to the right place */
*desc_on_ring = *desc;
/* if buff is not NULL assume indirect command */
if (buff != NULL) {
dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
/* copy the user buff into the respective DMA buff */
memcpy(dma_buff->va, buff, buff_size);
desc_on_ring->datalen = cpu_to_le16(buff_size);
/* Update the address values in the desc with the pa value
* for respective buffer
*/
desc_on_ring->params.external.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
desc_on_ring->params.external.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
/* bump the tail */
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
(hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
*/
if (!details->async && !details->postpone) {
u32 total_delay = 0;
u32 delay_len = 10;
do {
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
if (i40evf_asq_done(hw))
break;
/* ugh! delay while spin_lock */
udelay(delay_len);
total_delay += delay_len;
} while (total_delay < I40E_ASQ_CMD_TIMEOUT);
}
/* if ready, copy the desc back to temp */
if (i40evf_asq_done(hw)) {
*desc = *desc_on_ring;
if (buff != NULL)
memcpy(buff, dma_buff->va, buff_size);
retval = le16_to_cpu(desc->retval);
if (retval != 0) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Command completed with error 0x%X.\n",
retval);
/* strip off FW internal code */
retval &= 0xff;
}
cmd_completed = true;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = 0;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
}
asq_send_command_error:
mutex_unlock(&hw->aq.asq_mutex);
asq_send_command_exit:
return status;
}
/**
* i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
* @opcode: the opcode can be used to decide which flags to turn off or on
*
* Fill the desc with default values
**/
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode)
{
/* zero out the desc */
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
desc->opcode = cpu_to_le16(opcode);
desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
}
/**
* i40evf_clean_arq_element
* @hw: pointer to the hw struct
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
*
* This function cleans one Admin Receive Queue element and returns
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *pending)
{
i40e_status ret_code = 0;
u16 ntc = hw->aq.arq.next_to_clean;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
u16 desc_idx;
u16 datalen;
u16 flags;
u16 ntu;
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Queue is empty.\n");
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
/* now clean the next descriptor */
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
i40evf_debug_aq(hw,
I40E_DEBUG_AQ_COMMAND,
(void *)desc,
hw->aq.arq.r.arq_bi[desc_idx].va);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
hw->aq.arq_last_status);
} else {
e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
e->msg_size = min(datalen, e->msg_size);
if (e->msg_buf != NULL && (e->msg_size != 0))
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_size);
}
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, hw->aq.arq.tail, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
ntc = 0;
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
mutex_unlock(&hw->aq.arq_mutex);
return ret_code;
}
void i40evf_resume_aq(struct i40e_hw *hw)
{
/* Registers are reset after PF reset */
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
i40e_config_asq_regs(hw);
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
i40e_config_arq_regs(hw);
}
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_ADMINQ_H_
#define _I40E_ADMINQ_H_
#include "i40e_osdep.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
struct i40e_adminq_ring {
struct i40e_virt_mem dma_head; /* space for dma structures */
struct i40e_dma_mem desc_buf; /* descriptor ring memory */
struct i40e_virt_mem cmd_buf; /* command buffer memory */
union {
struct i40e_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi;
} r;
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
/* used for queue tracking */
u32 head;
u32 tail;
u32 len;
};
/* ASQ transaction details */
struct i40e_asq_cmd_details {
void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
u64 cookie;
u16 flags_ena;
u16 flags_dis;
bool async;
bool postpone;
};
#define I40E_ADMINQ_DETAILS(R, i) \
(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
struct i40e_arq_event_info {
struct i40e_aq_desc desc;
u16 msg_size;
u8 *msg_buf;
};
/* Admin Queue information */
struct i40e_adminq_info {
struct i40e_adminq_ring arq; /* receive queue */
struct i40e_adminq_ring asq; /* send queue */
u16 num_arq_entries; /* receive queue depth */
u16 num_asq_entries; /* send queue depth */
u16 arq_buf_size; /* receive queue buffer size */
u16 asq_buf_size; /* send queue buffer size */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
enum i40e_admin_queue_err asq_last_status;
enum i40e_admin_queue_err arq_last_status;
};
/* general information */
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
#endif /* _I40E_ADMINQ_H_ */
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
/* This header file defines the i40e Admin Queue commands and is shared between
* i40e Firmware and Software.
*
* This file needs to comply with the Linux Kernel coding style.
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR 0x0001
struct i40e_aq_desc {
__le16 flags;
__le16 opcode;
__le16 datalen;
__le16 retval;
__le32 cookie_high;
__le32 cookie_low;
union {
struct {
__le32 param0;
__le32 param1;
__le32 param2;
__le32 param3;
} internal;
struct {
__le32 param0;
__le32 param1;
__le32 addr_high;
__le32 addr_low;
} external;
u8 raw[16];
} params;
};
/* Flags sub-structure
* |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
* |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
*/
/* command flags and offsets*/
#define I40E_AQ_FLAG_DD_SHIFT 0
#define I40E_AQ_FLAG_CMP_SHIFT 1
#define I40E_AQ_FLAG_ERR_SHIFT 2
#define I40E_AQ_FLAG_VFE_SHIFT 3
#define I40E_AQ_FLAG_LB_SHIFT 9
#define I40E_AQ_FLAG_RD_SHIFT 10
#define I40E_AQ_FLAG_VFC_SHIFT 11
#define I40E_AQ_FLAG_BUF_SHIFT 12
#define I40E_AQ_FLAG_SI_SHIFT 13
#define I40E_AQ_FLAG_EI_SHIFT 14
#define I40E_AQ_FLAG_FE_SHIFT 15
#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
I40E_AQ_RC_OK = 0, /* success */
I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
I40E_AQ_RC_ENOENT = 2, /* No such element */
I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
I40E_AQ_RC_EINTR = 4, /* operation interrupted */
I40E_AQ_RC_EIO = 5, /* I/O error */
I40E_AQ_RC_ENXIO = 6, /* No such resource */
I40E_AQ_RC_E2BIG = 7, /* Arg too long */
I40E_AQ_RC_EAGAIN = 8, /* Try again */
I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
I40E_AQ_RC_EACCES = 10, /* Permission denied */
I40E_AQ_RC_EFAULT = 11, /* Bad address */
I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
I40E_AQ_RC_EEXIST = 13, /* object already exists */
I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
I40E_AQ_RC_EFBIG = 22, /* File too large */
};
/* Admin Queue command opcodes */
enum i40e_admin_queue_opc {
/* aq commands */
i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003,
/* resource ownership */
i40e_aqc_opc_request_resource = 0x0008,
i40e_aqc_opc_release_resource = 0x0009,
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
i40e_aqc_opc_set_cppm_configuration = 0x0103,
i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
i40e_aqc_opc_remove_statistics = 0x0202,
i40e_aqc_opc_set_port_parameters = 0x0203,
i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
i40e_aqc_opc_add_vsi = 0x0210,
i40e_aqc_opc_update_vsi_parameters = 0x0211,
i40e_aqc_opc_get_vsi_parameters = 0x0212,
i40e_aqc_opc_add_pv = 0x0220,
i40e_aqc_opc_update_pv_parameters = 0x0221,
i40e_aqc_opc_get_pv_parameters = 0x0222,
i40e_aqc_opc_add_veb = 0x0230,
i40e_aqc_opc_update_veb_parameters = 0x0231,
i40e_aqc_opc_get_veb_parameters = 0x0232,
i40e_aqc_opc_delete_element = 0x0243,
i40e_aqc_opc_add_macvlan = 0x0250,
i40e_aqc_opc_remove_macvlan = 0x0251,
i40e_aqc_opc_add_vlan = 0x0252,
i40e_aqc_opc_remove_vlan = 0x0253,
i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
i40e_aqc_opc_add_tag = 0x0255,
i40e_aqc_opc_remove_tag = 0x0256,
i40e_aqc_opc_add_multicast_etag = 0x0257,
i40e_aqc_opc_remove_multicast_etag = 0x0258,
i40e_aqc_opc_update_tag = 0x0259,
i40e_aqc_opc_add_control_packet_filter = 0x025A,
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
i40e_aqc_opc_set_storm_control_config = 0x0280,
i40e_aqc_opc_get_storm_control_config = 0x0281,
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
i40e_aqc_opc_query_vsi_bw_config = 0x0408,
i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
i40e_aqc_opc_query_port_ets_config = 0x0419,
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
i40e_aqc_opc_set_mac_config = 0x0603,
i40e_aqc_opc_set_link_restart_an = 0x0605,
i40e_aqc_opc_get_link_status = 0x0607,
i40e_aqc_opc_set_phy_int_mask = 0x0613,
i40e_aqc_opc_get_local_advt_reg = 0x0614,
i40e_aqc_opc_set_local_advt_reg = 0x0615,
i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
i40e_aqc_opc_set_phy_reset = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
i40e_aqc_opc_nvm_erase = 0x0702,
i40e_aqc_opc_nvm_update = 0x0703,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
i40e_aqc_opc_send_msg_to_vf = 0x0802,
i40e_aqc_opc_send_msg_to_peer = 0x0803,
/* alternate structure */
i40e_aqc_opc_alternate_write = 0x0900,
i40e_aqc_opc_alternate_write_indirect = 0x0901,
i40e_aqc_opc_alternate_read = 0x0902,
i40e_aqc_opc_alternate_read_indirect = 0x0903,
i40e_aqc_opc_alternate_write_done = 0x0904,
i40e_aqc_opc_alternate_set_mode = 0x0905,
i40e_aqc_opc_alternate_clear_port = 0x0906,
/* LLDP commands */
i40e_aqc_opc_lldp_get_mib = 0x0A00,
i40e_aqc_opc_lldp_update_mib = 0x0A01,
i40e_aqc_opc_lldp_add_tlv = 0x0A02,
i40e_aqc_opc_lldp_update_tlv = 0x0A03,
i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
i40e_aqc_opc_lldp_stop = 0x0A05,
i40e_aqc_opc_lldp_start = 0x0A06,
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
i40e_aqc_opc_tunnel_key_structure = 0x0B10,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
/* OEM commands */
i40e_aqc_opc_oem_parameter_change = 0xFE00,
i40e_aqc_opc_oem_device_status_change = 0xFE01,
/* debug commands */
i40e_aqc_opc_debug_get_deviceid = 0xFF00,
i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09,
};
/* command structures and indirect data structures */
/* Structure naming conventions:
* - no suffix for direct command descriptor structures
* - _data for indirect sent data
* - _resp for indirect return data (data which is both will use _data)
* - _completion for direct return data
* - _element_ for repeated elements (may also be _data or _resp)
*
* Command structures are expected to overlay the params.raw member of the basic
* descriptor, and as such cannot exceed 16 bytes in length.
*/
/* This macro is used to generate a compilation error if a structure
* is not exactly the correct length. It gives a divide by zero error if the
* structure is not of the correct size, otherwise it creates an enum that is
* never used.
*/
#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
{ i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
/* This macro is used extensively to ensure that command structures are 16
* bytes in length as they have to map to the raw array of that size.
*/
#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
/* internal (0x00XX) commands */
/* Get version (direct 0x0001) */
struct i40e_aqc_get_version {
__le32 rom_ver;
__le32 fw_build;
__le16 fw_major;
__le16 fw_minor;
__le16 api_major;
__le16 api_minor;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
/* Send driver version (indirect 0x0002) */
struct i40e_aqc_driver_version {
u8 driver_major_ver;
u8 driver_minor_ver;
u8 driver_build_ver;
u8 driver_subbuild_ver;
u8 reserved[4];
__le32 address_high;
__le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
/* Queue Shutdown (direct 0x0003) */
struct i40e_aqc_queue_shutdown {
__le32 driver_unloading;
#define I40E_AQ_DRIVER_UNLOADING 0x1
u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
#define I40E_AQ_RESOURCE_NVM 1
#define I40E_AQ_RESOURCE_SDP 2
#define I40E_AQ_RESOURCE_ACCESS_READ 1
#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
struct i40e_aqc_request_resource {
__le16 resource_id;
__le16 access_type;
__le32 timeout;
__le32 resource_number;
u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
/* Get function capabilities (indirect 0x000A)
* Get device capabilities (indirect 0x000B)
*/
struct i40e_aqc_list_capabilites {
u8 command_flags;
#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
u8 pf_index;
u8 reserved[2];
__le32 count;
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
struct i40e_aqc_list_capabilities_element_resp {
__le16 id;
u8 major_rev;
u8 minor_rev;
__le32 number;
__le32 logical_id;
__le32 phys_id;
u8 reserved[16];
};
/* list of caps */
#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
#define I40E_AQ_CAP_ID_SRIOV 0x0012
#define I40E_AQ_CAP_ID_VF 0x0013
#define I40E_AQ_CAP_ID_VMDQ 0x0014
#define I40E_AQ_CAP_ID_8021QBG 0x0015
#define I40E_AQ_CAP_ID_8021QBR 0x0016
#define I40E_AQ_CAP_ID_VSI 0x0017
#define I40E_AQ_CAP_ID_DCB 0x0018
#define I40E_AQ_CAP_ID_FCOE 0x0021
#define I40E_AQ_CAP_ID_RSS 0x0040
#define I40E_AQ_CAP_ID_RXQ 0x0041
#define I40E_AQ_CAP_ID_TXQ 0x0042
#define I40E_AQ_CAP_ID_MSIX 0x0043
#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
#define I40E_AQ_CAP_ID_1588 0x0046
#define I40E_AQ_CAP_ID_IWARP 0x0051
#define I40E_AQ_CAP_ID_LED 0x0061
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
/* Set CPPM Configuration (direct 0x0103) */
struct i40e_aqc_cppm_configuration {
__le16 command_flags;
#define I40E_AQ_CPPM_EN_LTRC 0x0800
#define I40E_AQ_CPPM_EN_DMCTH 0x1000
#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
#define I40E_AQ_CPPM_EN_HPTC 0x4000
#define I40E_AQ_CPPM_EN_DMARC 0x8000
__le16 ttlx;
__le32 dmacr;
__le16 dmcth;
u8 hptc;
u8 reserved;
__le32 pfltrc;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
#define I40E_AQ_ARP_INIT_IPV4 0x0008
#define I40E_AQ_ARP_UNSUP_CTL 0x0010
#define I40E_AQ_ARP_ENA 0x0020
#define I40E_AQ_ARP_ADD_IPV4 0x0040
#define I40E_AQ_ARP_DEL_IPV4 0x0080
__le16 table_id;
__le32 pfpm_proxyfc;
__le32 ip_addr;
u8 mac_addr[6];
};
/* Set NS Proxy Table Entry Command (indirect 0x0105) */
struct i40e_aqc_ns_proxy_data {
__le16 table_idx_mac_addr_0;
__le16 table_idx_mac_addr_1;
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
#define I40E_AQ_NS_PROXY_ADD_0 0x0100
#define I40E_AQ_NS_PROXY_DEL_0 0x0200
#define I40E_AQ_NS_PROXY_ADD_1 0x0400
#define I40E_AQ_NS_PROXY_DEL_1 0x0800
#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
u8 ipv6_addr_1[16];
};
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
#define I40E_AQ_LAA_FLAG_WR 0x8000
u8 reserved[2];
__le32 sal;
__le16 sah;
u8 reserved2[6];
};
/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
#define I40E_AQC_LAN_ADDR_VALID 0x10
#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_ADDR_VALID_MASK 0xf0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
struct i40e_aqc_mac_address_read_data {
u8 pf_lan_mac[6];
u8 pf_san_mac[6];
u8 port_mac[6];
u8 pf_wol_mac[6];
};
I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
__le16 command_flags;
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
#define I40E_AQC_WRITE_TYPE_MASK 0xc000
__le16 mac_sah;
__le32 mac_sal;
u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
/* PXE commands (0x011x) */
/* Clear PXE Command and response (direct 0x0110) */
struct i40e_aqc_clear_pxe {
u8 rx_cnt;
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
* command
*/
struct i40e_aqc_switch_seid {
__le16 seid;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
/* Get Switch Configuration command (indirect 0x0200)
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_switch_config_header_resp {
__le16 num_reported;
__le16 num_total;
u8 reserved[12];
};
struct i40e_aqc_switch_config_element_resp {
u8 element_type;
#define I40E_AQ_SW_ELEM_TYPE_MAC 1
#define I40E_AQ_SW_ELEM_TYPE_PF 2
#define I40E_AQ_SW_ELEM_TYPE_VF 3
#define I40E_AQ_SW_ELEM_TYPE_EMP 4
#define I40E_AQ_SW_ELEM_TYPE_BMC 5
#define I40E_AQ_SW_ELEM_TYPE_PV 16
#define I40E_AQ_SW_ELEM_TYPE_VEB 17
#define I40E_AQ_SW_ELEM_TYPE_PA 18
#define I40E_AQ_SW_ELEM_TYPE_VSI 19
u8 revision;
#define I40E_AQ_SW_ELEM_REV_1 1
__le16 seid;
__le16 uplink_seid;
__le16 downlink_seid;
u8 reserved[3];
u8 connection_type;
#define I40E_AQ_CONN_TYPE_REGULAR 0x1
#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
#define I40E_AQ_CONN_TYPE_CASCADED 0x3
__le16 scheduler_id;
__le16 element_info;
};
/* Get Switch Configuration (indirect 0x0200)
* an array of elements are returned in the response buffer
* the first in the array is the header, remainder are elements
*/
struct i40e_aqc_get_switch_config_resp {
struct i40e_aqc_get_switch_config_header_resp header;
struct i40e_aqc_switch_config_element_resp element[1];
};
/* Add Statistics (direct 0x0201)
* Remove Statistics (direct 0x0202)
*/
struct i40e_aqc_add_remove_statistics {
__le16 seid;
__le16 vlan;
__le16 stat_index;
u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
/* Set Port Parameters command (direct 0x0203) */
struct i40e_aqc_set_port_parameters {
__le16 command_flags;
#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
/* Get Switch Resource Allocation (indirect 0x0204) */
struct i40e_aqc_get_switch_resource_alloc {
u8 num_entries; /* reserved for command */
u8 reserved[7];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
/* expect an array of these structs in the response buffer */
struct i40e_aqc_switch_resource_alloc_element_resp {
u8 resource_type;
#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
u8 reserved1;
__le16 guaranteed;
__le16 total;
__le16 used;
__le16 total_unalloced;
u8 reserved2[6];
};
/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
*
* Update VSI (indirect 0x211)
* uses the same data structure as Add VSI
*
* Get VSI (indirect 0x0212)
* uses the same completion and data structure as Add VSI
*/
struct i40e_aqc_add_get_update_vsi {
__le16 uplink_seid;
u8 connection_type;
#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
u8 reserved1;
u8 vf_id;
u8 reserved2;
__le16 vsi_flags;
#define I40E_AQ_VSI_TYPE_SHIFT 0x0
#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
#define I40E_AQ_VSI_TYPE_VF 0x0
#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
#define I40E_AQ_VSI_TYPE_PF 0x2
#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
struct i40e_aqc_add_get_update_vsi_completion {
__le16 seid;
__le16 vsi_number;
__le16 vsi_used;
__le16 vsi_free;
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
struct i40e_aqc_vsi_properties_data {
/* first 96 byte are written by SW */
__le16 valid_sections;
#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
/* switch section */
__le16 switch_id; /* 12bit id combined with flags below */
#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
u8 sw_reserved[2];
/* security section */
u8 sec_flags;
#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
u8 sec_reserved;
/* VLAN section */
__le16 pvid; /* VLANS include priority bits */
__le16 fcoe_pvid;
u8 port_vlan_flags;
#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
I40E_AQ_VSI_PVLAN_MODE_SHIFT)
#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
u8 pvlan_reserved[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
__le32 egress_table; /* same defines as for ingress table */
/* cascaded PV section */
__le16 cas_pv_tag;
u8 cas_pv_flags;
#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
u8 cas_pv_reserved;
/* queue mapping section */
__le16 mapping_flags;
#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
__le16 queue_mapping[16];
#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
__le16 tc_mapping[8];
#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
u8 sched_reserved;
/* outer up section */
__le32 outer_up_table; /* same structure and defines as ingress table */
u8 cmd_reserved[8];
/* last 32 bytes are written by FW */
__le16 qs_handle[8];
#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
__le16 stat_counter_idx;
__le16 sched_id;
u8 resp_reserved[12];
};
I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
/* Add Port Virtualizer (direct 0x0220)
* also used for update PV (direct 0x0221) but only flags are used
* (IS_CTRL_PORT only works on add PV)
*/
struct i40e_aqc_add_update_pv {
__le16 command_flags;
#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
__le16 uplink_seid;
__le16 connected_seid;
u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
struct i40e_aqc_add_update_pv_completion {
/* reserved for update; for add also encodes error if rc == ENOSPC */
__le16 pv_seid;
#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
/* Get PV Params (direct 0x0222)
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_pv_params_completion {
__le16 seid;
__le16 default_stag;
__le16 pv_flags; /* same flags as add_pv */
#define I40E_AQC_GET_PV_PV_TYPE 0x1
#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
u8 reserved[8];
__le16 default_port_seid;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
/* Add VEB (direct 0x0230) */
struct i40e_aqc_add_veb {
__le16 uplink_seid;
__le16 downlink_seid;
__le16 veb_flags;
#define I40E_AQC_ADD_VEB_FLOATING 0x1
#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
u8 enable_tcs;
u8 reserved[9];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
struct i40e_aqc_add_veb_completion {
u8 reserved[6];
__le16 switch_seid;
/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
__le16 veb_seid;
#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
__le16 statistic_index;
__le16 vebs_used;
__le16 vebs_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
/* Get VEB Parameters (direct 0x0232)
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_veb_parameters_completion {
__le16 seid;
__le16 switch_id;
__le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
__le16 statistic_index;
__le16 vebs_used;
__le16 vebs_free;
u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
/* Delete Element (direct 0x0243)
* uses the generic i40e_aqc_switch_seid
*/
/* Add MAC-VLAN (indirect 0x0250) */
/* used for the command for most vlan commands */
struct i40e_aqc_macvlan {
__le16 num_addresses;
__le16 seid[3];
#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
/* indirect data for command and response */
struct i40e_aqc_add_macvlan_element_data {
u8 mac_addr[6];
__le16 vlan_tag;
__le16 flags;
#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
__le16 queue_number;
#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
/* response section */
u8 match_method;
#define I40E_AQC_MM_PERFECT_MATCH 0x01
#define I40E_AQC_MM_HASH_MATCH 0x02
#define I40E_AQC_MM_ERR_NO_RES 0xFF
u8 reserved1[3];
};
struct i40e_aqc_add_remove_macvlan_completion {
__le16 perfect_mac_used;
__le16 perfect_mac_free;
__le16 unicast_hash_free;
__le16 multicast_hash_free;
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
/* Remove MAC-VLAN (indirect 0x0251)
* uses i40e_aqc_macvlan for the descriptor
* data points to an array of num_addresses of elements
*/
struct i40e_aqc_remove_macvlan_element_data {
u8 mac_addr[6];
__le16 vlan_tag;
u8 flags;
#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
u8 reserved[3];
/* reply section */
u8 error_code;
#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
u8 reply_reserved[3];
};
/* Add VLAN (indirect 0x0252)
* Remove VLAN (indirect 0x0253)
* use the generic i40e_aqc_macvlan for the command
*/
struct i40e_aqc_add_remove_vlan_element_data {
__le16 vlan_tag;
u8 vlan_flags;
/* flags for add VLAN */
#define I40E_AQC_ADD_VLAN_LOCAL 0x1
#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
#define I40E_AQC_VLAN_PTYPE_SHIFT 3
#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
/* flags for remove VLAN */
#define I40E_AQC_REMOVE_VLAN_ALL 0x1
u8 reserved;
u8 result;
/* flags for add VLAN */
#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
/* flags for remove VLAN */
#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
u8 reserved1[3];
};
struct i40e_aqc_add_remove_vlan_completion {
u8 reserved[4];
__le16 vlans_used;
__le16 vlans_free;
__le32 addr_high;
__le32 addr_low;
};
/* Set VSI Promiscuous Modes (direct 0x0254) */
struct i40e_aqc_set_vsi_promiscuous_modes {
__le16 promiscuous_flags;
__le16 valid_flags;
/* flags used for both fields above */
#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
#define I40E_AQC_SET_VSI_DEFAULT 0x08
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
/* Add S/E-tag command (direct 0x0255)
* Uses generic i40e_aqc_add_remove_tag_completion for completion
*/
struct i40e_aqc_add_tag {
__le16 flags;
#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
__le16 seid;
#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
__le16 tag;
__le16 queue_number;
u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
struct i40e_aqc_add_remove_tag_completion {
u8 reserved[12];
__le16 tags_used;
__le16 tags_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
/* Remove S/E-tag command (direct 0x0256)
* Uses generic i40e_aqc_add_remove_tag_completion for completion
*/
struct i40e_aqc_remove_tag {
__le16 seid;
#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
__le16 tag;
u8 reserved[12];
};
/* Add multicast E-Tag (direct 0x0257)
* del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
* and no external data
*/
struct i40e_aqc_add_remove_mcast_etag {
__le16 pv_seid;
__le16 etag;
u8 num_unicast_etags;
u8 reserved[3];
__le32 addr_high; /* address of array of 2-byte s-tags */
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
struct i40e_aqc_add_remove_mcast_etag_completion {
u8 reserved[4];
__le16 mcast_etags_used;
__le16 mcast_etags_free;
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
/* Update S/E-Tag (direct 0x0259) */
struct i40e_aqc_update_tag {
__le16 seid;
#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
__le16 old_tag;
__le16 new_tag;
u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
struct i40e_aqc_update_tag_completion {
u8 reserved[12];
__le16 tags_used;
__le16 tags_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
/* Add Control Packet filter (direct 0x025A)
* Remove Control Packet filter (direct 0x025B)
* uses the i40e_aqc_add_oveb_cloud,
* and the generic direct completion structure
*/
struct i40e_aqc_add_remove_control_packet_filter {
u8 mac[6];
__le16 etype;
__le16 flags;
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
__le16 seid;
#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
__le16 queue;
u8 reserved[2];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
struct i40e_aqc_add_remove_control_packet_filter_completion {
__le16 mac_etype_used;
__le16 etype_used;
__le16 mac_etype_free;
__le16 etype_free;
u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
/* Add Cloud filters (indirect 0x025C)
* Remove Cloud filters (indirect 0x025D)
* uses the i40e_aqc_add_remove_cloud_filters,
* and the generic indirect completion structure
*/
struct i40e_aqc_add_remove_cloud_filters {
u8 num_filters;
u8 reserved;
__le16 seid;
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
u8 reserved2[4];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 outer_mac[6];
u8 inner_mac[6];
__le16 inner_vlan;
union {
struct {
u8 reserved[12];
u8 data[4];
} v4;
struct {
u8 data[16];
} v6;
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
/* 0x0002 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
/* 0x0005 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
/* 0x0007 reserved */
/* 0x0008 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
__le32 tenant_id ;
u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
u8 reserved2[14];
/* response section */
u8 allocation_result;
#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
u8 response_reserved[7];
};
struct i40e_aqc_remove_cloud_filters_completion {
__le16 perfect_ovlan_used;
__le16 perfect_ovlan_free;
__le16 vlan_used;
__le16 vlan_free;
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
/* Add Mirror Rule (indirect or direct 0x0260)
* Delete Mirror Rule (indirect or direct 0x0261)
* note: some rule types (4,5) do not use an external buffer.
* take care to set the flags correctly.
*/
struct i40e_aqc_add_delete_mirror_rule {
__le16 seid;
__le16 rule_type;
#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
__le16 num_entries;
__le16 destination; /* VSI for add, rule id for delete */
__le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
struct i40e_aqc_add_delete_mirror_rule_completion {
u8 reserved[2];
__le16 rule_id; /* only used on add */
__le16 mirror_rules_used;
__le16 mirror_rules_free;
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
/* Set Storm Control Configuration (direct 0x0280)
* Get Storm Control Configuration (direct 0x0281)
* the command and response use the same descriptor structure
*/
struct i40e_aqc_set_get_storm_control_config {
__le32 broadcast_threshold;
__le32 multicast_threshold;
__le32 control_flags;
#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
#define I40E_AQC_STORM_CONTROL_MDICW 0x02
#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
#define I40E_AQC_STORM_CONTROL_BDICW 0x08
#define I40E_AQC_STORM_CONTROL_BIDU 0x10
#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
* the command and response use the same descriptor structure
*/
struct i40e_aqc_pfc_ignore {
u8 tc_bitmap;
u8 command_flags; /* unused on response */
#define I40E_AQC_PFC_IGNORE_SET 0x80
#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
* with no parameters
*/
/* TX scheduler 0x04xx */
/* Almost all the indirect commands use
* this generic struct to pass the SEID in param0
*/
struct i40e_aqc_tx_sched_ind {
__le16 vsi_seid;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
/* Several commands respond with a set of queue set handles */
struct i40e_aqc_qs_handles_resp {
__le16 qs_handles[8];
};
/* Configure VSI BW limits (direct 0x0400) */
struct i40e_aqc_configure_vsi_bw_limit {
__le16 vsi_seid;
u8 reserved[2];
__le16 credit;
u8 reserved1[2];
u8 max_credit; /* 0-3, limit = 2^max */
u8 reserved2[7];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
* responds with i40e_aqc_qs_handles_resp
*/
struct i40e_aqc_configure_vsi_ets_sla_bw_data {
u8 tc_valid_bits;
u8 reserved[15];
__le16 tc_bw_credits[8]; /* FW writesback QS handles here */
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
__le16 tc_bw_max[2];
u8 reserved1[28];
};
/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
* responds with i40e_aqc_qs_handles_resp
*/
struct i40e_aqc_configure_vsi_tc_bw_data {
u8 tc_valid_bits;
u8 reserved[3];
u8 tc_bw_credits[8];
u8 reserved1[4];
__le16 qs_handles[8];
};
/* Query vsi bw configuration (indirect 0x0408) */
struct i40e_aqc_query_vsi_bw_config_resp {
u8 tc_valid_bits;
u8 tc_suspended_bits;
u8 reserved[14];
__le16 qs_handles[8];
u8 reserved1[4];
__le16 port_bw_limit;
u8 reserved2[2];
u8 max_bw; /* 0-3, limit = 2^max */
u8 reserved3[23];
};
/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
struct i40e_aqc_query_vsi_ets_sla_config_resp {
u8 tc_valid_bits;
u8 reserved[3];
u8 share_credits[8];
__le16 credits[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
__le16 tc_bw_max[2];
};
/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
struct i40e_aqc_configure_switching_comp_bw_limit {
__le16 seid;
u8 reserved[2];
__le16 credit;
u8 reserved1[2];
u8 max_bw; /* 0-3, limit = 2^max */
u8 reserved2[7];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
/* Enable Physical Port ETS (indirect 0x0413)
* Modify Physical Port ETS (indirect 0x0414)
* Disable Physical Port ETS (indirect 0x0415)
*/
struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4];
u8 tc_valid_bits;
u8 reserved1;
u8 tc_strict_priority_flags;
u8 reserved2[17];
u8 tc_bw_share_credits[8];
u8 reserved3[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 tc_valid_bits;
u8 reserved[15];
__le16 tc_bw_credit[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
__le16 tc_bw_max[2];
u8 reserved1[28];
};
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
*/
struct i40e_aqc_configure_switching_comp_bw_config_data {
u8 tc_valid_bits;
u8 reserved[2];
u8 absolute_credits; /* bool */
u8 tc_bw_share_credits[8];
u8 reserved1[20];
};
/* Query Switching Component Configuration (indirect 0x0418) */
struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 tc_valid_bits;
u8 reserved[35];
__le16 port_bw_limit;
u8 reserved1[2];
u8 tc_bw_max; /* 0-3, limit = 2^max */
u8 reserved2[23];
};
/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
struct i40e_aqc_query_port_ets_config_resp {
u8 reserved[4];
u8 tc_valid_bits;
u8 reserved1;
u8 tc_strict_priority_bits;
u8 reserved2;
u8 tc_bw_share_credits[8];
__le16 tc_bw_limits[8];
/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
__le16 tc_bw_max[2];
u8 reserved3[32];
};
/* Query Switching Component Bandwidth Allocation per Traffic Type
* (indirect 0x041A)
*/
struct i40e_aqc_query_switching_comp_bw_config_resp {
u8 tc_valid_bits;
u8 reserved[2];
u8 absolute_credits_enable; /* bool */
u8 tc_bw_share_credits[8];
__le16 tc_bw_limits[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
__le16 tc_bw_max[2];
};
/* Suspend/resume port TX traffic
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
struct i40e_aq_get_set_hmc_resource_profile {
u8 pm_profile;
u8 pe_vf_enabled;
u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
enum i40e_aq_hmc_profile {
/* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
I40E_HMC_PROFILE_DEFAULT = 1,
I40E_HMC_PROFILE_FAVOR_VF = 2,
I40E_HMC_PROFILE_EQUAL = 3,
};
#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
enum i40e_aq_phy_type {
I40E_PHY_TYPE_SGMII = 0x0,
I40E_PHY_TYPE_1000BASE_KX = 0x1,
I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
I40E_PHY_TYPE_10GBASE_KR = 0x3,
I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
I40E_PHY_TYPE_XAUI = 0x5,
I40E_PHY_TYPE_XFI = 0x6,
I40E_PHY_TYPE_SFI = 0x7,
I40E_PHY_TYPE_XLAUI = 0x8,
I40E_PHY_TYPE_XLPPI = 0x9,
I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
I40E_PHY_TYPE_10GBASE_SR = 0x14,
I40E_PHY_TYPE_10GBASE_LR = 0x15,
I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
I40E_PHY_TYPE_MAX
};
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
};
struct i40e_aqc_module_desc {
u8 oui[3];
u8 reserved1;
u8 part_number[16];
u8 revision[4];
u8 reserved2[8];
};
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
u8 link_speed; /* bitmap using the above enum bit patterns */
u8 abilities;
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
#define I40E_AQ_PHY_FLAG_AN_ON 0x02
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
#define I40E_AQ_EEE_10GBASE_T 0x0008
#define I40E_AQ_EEE_1000BASE_KX 0x0010
#define I40E_AQ_EEE_10GBASE_KX4 0x0020
#define I40E_AQ_EEE_10GBASE_KR 0x0040
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
u8 reserved[3];
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
#define I40E_AQ_PHY_MAX_QMS 16
struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
};
/* Set PHY Config (direct 0x0601) */
struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
u8 link_speed;
u8 abilities;
/* bits 0-2 use the values from get_phy_abilities_resp */
#define I40E_AQ_PHY_ENABLE_LINK 0x08
#define I40E_AQ_PHY_ENABLE_AN 0x10
#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
u8 reserved[3];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
/* Set MAC Config command data structure (direct 0x0603) */
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
/* Restart Auto-Negotiation (direct 0x605) */
struct i40e_aqc_set_link_restart_an {
u8 command;
#define I40E_AQ_PHY_RESTART_AN 0x02
#define I40E_AQ_PHY_LINK_ENABLE 0x04
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
/* Get Link Status cmd & response data structure (direct 0x0607) */
struct i40e_aqc_get_link_status {
__le16 command_flags; /* only field set on command */
#define I40E_AQ_LSE_MASK 0x3
#define I40E_AQ_LSE_NOP 0x0
#define I40E_AQ_LSE_DISABLE 0x2
#define I40E_AQ_LSE_ENABLE 0x3
/* only response uses this flag */
#define I40E_AQ_LSE_IS_ENABLED 0x1
u8 phy_type; /* i40e_aq_phy_type */
u8 link_speed; /* i40e_aq_link_speed */
u8 link_info;
#define I40E_AQ_LINK_UP 0x01
#define I40E_AQ_LINK_FAULT 0x02
#define I40E_AQ_LINK_FAULT_TX 0x04
#define I40E_AQ_LINK_FAULT_RX 0x08
#define I40E_AQ_LINK_FAULT_REMOTE 0x10
#define I40E_AQ_MEDIA_AVAILABLE 0x40
#define I40E_AQ_SIGNAL_DETECT 0x80
u8 an_info;
#define I40E_AQ_AN_COMPLETED 0x01
#define I40E_AQ_LP_AN_ABILITY 0x02
#define I40E_AQ_PD_FAULT 0x04
#define I40E_AQ_FEC_EN 0x08
#define I40E_AQ_PHY_LOW_POWER 0x10
#define I40E_AQ_LINK_PAUSE_TX 0x20
#define I40E_AQ_LINK_PAUSE_RX 0x40
#define I40E_AQ_QUALIFIED_MODULE 0x80
u8 ext_info;
#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
#define I40E_AQ_LINK_TX_SHIFT 0x02
#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
#define I40E_AQ_LINK_TX_ACTIVE 0x00
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
u8 reserved[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
/* Set event mask command (direct 0x613) */
struct i40e_aqc_set_phy_int_mask {
u8 reserved[8];
__le16 event_mask;
#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
#define I40E_AQ_EVENT_MEDIA_NA 0x0004
#define I40E_AQ_EVENT_LINK_FAULT 0x0008
#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
u8 reserved1[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
/* Get Local AN advt register (direct 0x0614)
* Set Local AN advt register (direct 0x0615)
* Get Link Partner AN advt register (direct 0x0616)
*/
struct i40e_aqc_an_advt_reg {
__le32 local_an_reg0;
__le16 local_an_reg1;
u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
/* Set Loopback mode (0x0618) */
struct i40e_aqc_set_lb_mode {
__le16 lb_mode;
#define I40E_AQ_LB_PHY_LOCAL 0x01
#define I40E_AQ_LB_PHY_REMOTE 0x02
#define I40E_AQ_LB_MAC_LOCAL 0x04
u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
/* Set PHY Reset command (0x0622) */
struct i40e_aqc_set_phy_reset {
u8 reset_flags;
#define I40E_AQ_PHY_RESET_REQUEST 0x02
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_INTERNAL = 0x1,
I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
};
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
#define I40E_AQ_NVM_FLASH_ONLY 0x80
u8 module_pointer;
__le16 length;
__le32 offset;
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
*/
struct i40e_aqc_pf_vf_message {
__le32 id;
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
/* Alternate structure */
/* Direct write (direct 0x0900)
* Direct read (direct 0x0902)
*/
struct i40e_aqc_alternate_write {
__le32 address0;
__le32 data0;
__le32 address1;
__le32 data1;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
/* Indirect write (indirect 0x0901)
* Indirect read (indirect 0x0903)
*/
struct i40e_aqc_alternate_ind_write {
__le32 address;
__le32 length;
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
/* Done alternate write (direct 0x0904)
* uses i40e_aq_desc
*/
struct i40e_aqc_alternate_write_done {
__le16 cmd_flags;
#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
/* Set OEM mode (direct 0x0905) */
struct i40e_aqc_alternate_set_mode {
__le32 mode;
#define I40E_AQ_ALTERNATE_MODE_NONE 0
#define I40E_AQ_ALTERNATE_MODE_OEM 1
u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
/* async events 0x10xx */
/* Lan Queue Overflow Event (direct, 0x1001) */
struct i40e_aqc_lan_overflow {
__le32 prtdcb_rupto;
__le32 otx_ctl;
u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
/* Get LLDP MIB (indirect 0x0A00) */
struct i40e_aqc_lldp_get_mib {
u8 type;
u8 reserved1;
#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
#define I40E_AQ_LLDP_MIB_LOCAL 0x0
#define I40E_AQ_LLDP_MIB_REMOTE 0x1
#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
#define I40E_AQ_LLDP_TX_SHIFT 0x4
#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
/* TX pause flags use I40E_AQ_LINK_TX_* above */
__le16 local_len;
__le16 remote_len;
u8 reserved2[2];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
/* Configure LLDP MIB Change Event (direct 0x0A01)
* also used for the event (with type in the command field)
*/
struct i40e_aqc_lldp_update_mib {
u8 command;
#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
u8 reserved[7];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
/* Add LLDP TLV (indirect 0x0A02)
* Delete LLDP TLV (indirect 0x0A04)
*/
struct i40e_aqc_lldp_add_tlv {
u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
u8 reserved1[1];
__le16 len;
u8 reserved2[4];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
/* Update LLDP TLV (indirect 0x0A03) */
struct i40e_aqc_lldp_update_tlv {
u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
u8 reserved;
__le16 old_len;
__le16 new_offset;
__le16 new_len;
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
u8 command;
#define I40E_AQ_LLDP_AGENT_STOP 0x0
#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
/* Start LLDP (direct 0x0A06) */
struct i40e_aqc_lldp_start {
u8 command;
#define I40E_AQ_LLDP_AGENT_START 0x1
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Apply MIB changes (0x0A07)
* uses the generic struc as it contains no data
*/
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
u8 header_len; /* in DWords, 1 to 15 */
u8 protocol_type;
#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0
#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2
#define I40E_AQC_TUNNEL_TYPE_NGE 0x3
u8 variable_udp_length;
#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
u8 udp_key_index;
#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
struct i40e_aqc_add_udp_tunnel_completion {
__le16 udp_port;
u8 filter_entry_index;
u8 multiple_pfs;
#define I40E_AQC_SINGLE_PF 0x0
#define I40E_AQC_MULTIPLE_PFS 0x1
u8 total_filters;
u8 reserved[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
/* remove UDP Tunnel command (0x0B01) */
struct i40e_aqc_remove_udp_tunnel {
u8 reserved[2];
u8 index; /* 0 to 15 */
u8 reserved2[13];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
struct i40e_aqc_del_udp_tunnel_completion {
__le16 udp_port;
u8 index; /* 0 to 15 */
u8 multiple_pfs;
u8 total_filters_used;
u8 reserved1[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
/* tunnel key structure 0x0B10 */
struct i40e_aqc_tunnel_key_structure {
u8 key1_off;
u8 key2_off;
u8 key1_len; /* 0 to 15 */
u8 key2_len; /* 0 to 15 */
u8 flags;
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
/* response flags */
#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
u8 network_key_index;
#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
/* OEM mode commands (direct 0xFE0x) */
struct i40e_aqc_oem_param_change {
__le32 param_type;
#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
#define I40E_AQ_OEM_PARAM_MAC 2
__le32 param_value1;
u8 param_value2[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
struct i40e_aqc_oem_state_change {
__le32 state;
#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
#define I40E_AQ_OEM_STATE_LINK_UP 0x1
u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
/* debug commands */
/* get device id (0xFF00) uses the generic structure */
/* set test more (0xFF01, internal) */
struct i40e_acq_set_test_mode {
u8 mode;
#define I40E_AQ_TEST_PARTIAL 0
#define I40E_AQ_TEST_FULL 1
#define I40E_AQ_TEST_NVM 2
u8 reserved[3];
u8 command;
#define I40E_AQ_TEST_OPEN 0
#define I40E_AQ_TEST_CLOSE 1
#define I40E_AQ_TEST_INC 2
u8 reserved2[3];
__le32 address_high;
__le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
/* Debug Read Register command (0xFF03)
* Debug Write Register command (0xFF04)
*/
struct i40e_aqc_debug_reg_read_write {
__le32 reserved;
__le32 address;
__le32 value_high;
__le32 value_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
/* Scatter/gather Reg Read (indirect 0xFF05)
* Scatter/gather Reg Write (indirect 0xFF06)
*/
/* i40e_aq_desc is used for the command */
struct i40e_aqc_debug_reg_sg_element_data {
__le32 address;
__le32 value;
};
/* Debug Modify register (direct 0xFF07) */
struct i40e_aqc_debug_modify_reg {
__le32 address;
__le32 value;
__le32 clear_mask;
__le32 set_mask;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
/* dump internal data (0xFF08, indirect) */
#define I40E_AQ_CLUSTER_ID_AUX 0
#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
#define I40E_AQ_CLUSTER_ID_TXSCHED 2
#define I40E_AQ_CLUSTER_ID_HMC 3
#define I40E_AQ_CLUSTER_ID_MAC0 4
#define I40E_AQ_CLUSTER_ID_MAC1 5
#define I40E_AQ_CLUSTER_ID_MAC2 6
#define I40E_AQ_CLUSTER_ID_MAC3 7
#define I40E_AQ_CLUSTER_ID_DCB 8
#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
#define I40E_AQ_CLUSTER_ID_ALTRAM 11
struct i40e_aqc_debug_dump_internals {
u8 cluster_id;
u8 table_id;
__le16 data_size;
__le32 idx;
__le32 address_high;
__le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
struct i40e_aqc_debug_modify_internals {
u8 cluster_id;
u8 cluster_specific_params[7];
__le32 address_high;
__le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
#endif
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_ALLOC_H_
#define _I40E_ALLOC_H_
struct i40e_hw;
/* Memory allocation types */
enum i40e_memory_type {
i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
i40e_mem_asq_buf = 1,
i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
i40e_mem_pd = 5, /* Page Descriptor */
i40e_mem_bp = 6, /* Backing Page - 4KB */
i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
i40e_mem_reserved
};
/* prototype for functions used for dynamic memory allocation */
i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
struct i40e_dma_mem *mem,
enum i40e_memory_type type,
u64 size, u32 alignment);
i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
struct i40e_dma_mem *mem);
i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
struct i40e_virt_mem *mem,
u32 size);
i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
struct i40e_virt_mem *mem);
#endif /* _I40E_ALLOC_H_ */
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
#include "i40e_virtchnl.h"
/**
* i40e_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
*
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
i40e_status i40e_set_mac_type(struct i40e_hw *hw)
{
i40e_status status = 0;
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
case I40E_SFP_XL710_DEVICE_ID:
case I40E_SFP_X710_DEVICE_ID:
case I40E_QEMU_DEVICE_ID:
case I40E_KX_A_DEVICE_ID:
case I40E_KX_B_DEVICE_ID:
case I40E_KX_C_DEVICE_ID:
case I40E_KX_D_DEVICE_ID:
case I40E_QSFP_A_DEVICE_ID:
case I40E_QSFP_B_DEVICE_ID:
case I40E_QSFP_C_DEVICE_ID:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_VF_DEVICE_ID:
case I40E_VF_HV_DEVICE_ID:
hw->mac.type = I40E_MAC_VF;
break;
default:
hw->mac.type = I40E_MAC_GENERIC;
break;
}
} else {
status = I40E_ERR_DEVICE_NOT_SUPPORTED;
}
hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
hw->mac.type, status);
return status;
}
/**
* i40evf_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
* @desc: pointer to admin queue descriptor
* @buffer: pointer to command buffer
*
* Dumps debug log about adminq command with descriptor contents.
**/
void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u8 *aq_buffer = (u8 *)buffer;
u32 data[4];
u32 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
aq_desc->retval);
i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
aq_desc->cookie_high, aq_desc->cookie_low);
i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
aq_desc->params.internal.param0,
aq_desc->params.internal.param1);
i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
aq_desc->params.external.addr_high,
aq_desc->params.external.addr_low);
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
memset(data, 0, sizeof(data));
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
data[((i % 16) / 4)] |=
((u32)aq_buffer[i]) << (8 * (i % 4));
if ((i % 16) == 15) {
i40e_debug(hw, mask,
"\t0x%04X %08X %08X %08X %08X\n",
i - 15, data[0], data[1], data[2],
data[3]);
memset(data, 0, sizeof(data));
}
}
if ((i % 16) != 0)
i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
i - (i % 16), data[0], data[1], data[2],
data[3]);
}
}
/**
* i40evf_check_asq_alive
* @hw: pointer to the hw struct
*
* Returns true if Queue is enabled else false.
**/
bool i40evf_check_asq_alive(struct i40e_hw *hw)
{
return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
}
/**
* i40evf_aq_queue_shutdown
* @hw: pointer to the hw struct
* @unloading: is the driver unloading itself
*
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading)
{
struct i40e_aq_desc desc;
struct i40e_aqc_queue_shutdown *cmd =
(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
if (unloading)
cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
return status;
}
/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cmd_details: pointer to command details
*
* Send message to PF driver using admin queue. By default, this message
* is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
* completion before returning.
**/
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen) {
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
| I40E_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
if (!cmd_details) {
struct i40e_asq_cmd_details details;
memset(&details, 0, sizeof(details));
details.async = true;
cmd_details = &details;
}
status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
msglen, cmd_details);
return status;
}
/**
* i40e_vf_parse_hw_config
* @hw: pointer to the hardware structure
* @msg: pointer to the virtual channel VF resource structure
*
* Given a VF resource message from the PF, populate the hw struct
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg)
{
struct i40e_virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
hw->dev_caps.num_vsis = msg->num_vsis;
hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_L2;
hw->dev_caps.fcoe = (msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
ETH_ALEN);
memcpy(hw->mac.addr, vsi_res->default_mac_addr,
ETH_ALEN);
}
vsi_res++;
}
}
/**
* i40e_vf_reset
* @hw: pointer to the hardware structure
*
* Send a VF_RESET message to the PF. Does not wait for response from PF
* as none will be forthcoming. Immediately after calling this function,
* the admin queue should be shut down and (optionally) reinitialized.
**/
i40e_status i40e_vf_reset(struct i40e_hw *hw)
{
return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
}
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_HMC_H_
#define _I40E_HMC_H_
#define I40E_HMC_MAX_BP_COUNT 512
/* forward-declare the HW struct for the compiler */
struct i40e_hw;
#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
#define I40E_HMC_PD_CNT_IN_SD 512
#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
#define I40E_HMC_PAGED_BP_SIZE 4096
#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
#define I40E_FIRST_VF_FPM_ID 16
struct i40e_hmc_obj_info {
u64 base; /* base addr in FPM */
u32 max_cnt; /* max count available for this hmc func */
u32 cnt; /* count of objects driver actually wants to create */
u64 size; /* size in bytes of one object */
};
enum i40e_sd_entry_type {
I40E_SD_TYPE_INVALID = 0,
I40E_SD_TYPE_PAGED = 1,
I40E_SD_TYPE_DIRECT = 2
};
struct i40e_hmc_bp {
enum i40e_sd_entry_type entry_type;
struct i40e_dma_mem addr; /* populate to be used by hw */
u32 sd_pd_index;
u32 ref_cnt;
};
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
bool valid;
};
struct i40e_hmc_pd_table {
struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
u32 ref_cnt;
u32 sd_index;
};
struct i40e_hmc_sd_entry {
enum i40e_sd_entry_type entry_type;
bool valid;
union {
struct i40e_hmc_pd_table pd_table;
struct i40e_hmc_bp bp;
} u;
};
struct i40e_hmc_sd_table {
struct i40e_virt_mem addr; /* used to track sd_entry allocations */
u32 sd_cnt;
u32 ref_cnt;
struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
};
struct i40e_hmc_info {
u32 signature;
/* equals to pci func num for PF and dynamically allocated for VFs */
u8 hmc_fn_id;
u16 first_sd_index; /* index of the first available SD */
/* hmc objects */
struct i40e_hmc_obj_info *hmc_obj;
struct i40e_virt_mem hmc_obj_virt_mem;
struct i40e_hmc_sd_table sd_table;
};
#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
/**
* I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
* @hw: pointer to our hw struct
* @pa: pointer to physical address
* @sd_index: segment descriptor index
* @type: if sd entry is direct or paged
**/
#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
{ \
u32 val1, val2, val3; \
val1 = (u32)(upper_32_bits(pa)); \
val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
}
/**
* I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
* @hw: pointer to our hw struct
* @sd_index: segment descriptor index
* @type: if sd entry is direct or paged
**/
#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
{ \
u32 val2, val3; \
val2 = (I40E_HMC_MAX_BP_COUNT << \
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
}
/**
* I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
**/
#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
wr32((hw), I40E_PFHMC_PDINV, \
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
* @index: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
*
* This function calculates the segment descriptor index and index limit
* for the resource defined by i40e_hmc_rsrc_type.
**/
#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
{ \
u64 fpm_addr, fpm_limit; \
fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
(hmc_info)->hmc_obj[(type)].size * (index); \
fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
*(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
*(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
/* add one more to the limit to correct our range */ \
*(sd_limit) += 1; \
}
/**
* I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
* @hmc_info: pointer to the HMC configuration information struct
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @pd_index: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
* defined by i40e_hmc_rsrc_type.
**/
#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
{ \
u64 fpm_adr, fpm_limit; \
fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
(hmc_info)->hmc_obj[(type)].size * (idx); \
fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
*(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
*(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
/* add one more to the limit to correct our range */ \
*(pd_limit) += 1; \
}
i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 sd_index,
enum i40e_sd_entry_type type,
u64 direct_mode_sz);
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf);
i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf);
#endif /* _I40E_HMC_H_ */
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_LAN_HMC_H_
#define _I40E_LAN_HMC_H_
/* forward-declare the HW struct for the compiler */
struct i40e_hw;
/* HMC element context information */
/* Rx queue context data */
struct i40e_hmc_obj_rxq {
u16 head;
u8 cpuid;
u64 base;
u16 qlen;
#define I40E_RXQ_CTX_DBUFF_SHIFT 7
u8 dbuff;
#define I40E_RXQ_CTX_HBUFF_SHIFT 6
u8 hbuff;
u8 dtype;
u8 dsize;
u8 crcstrip;
u8 fc_ena;
u8 l2tsel;
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u16 rxmax;
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
};
/* Tx queue context data */
struct i40e_hmc_obj_txq {
u16 head;
u8 new_context;
u64 base;
u8 fc_ena;
u8 timesync_ena;
u8 fd_ena;
u8 alt_vlan_ena;
u16 thead_wb;
u16 cpuid;
u8 head_wb_ena;
u16 qlen;
u8 tphrdesc_ena;
u8 tphrpacket_ena;
u8 tphwdesc_ena;
u64 head_wb_addr;
u32 crc;
u16 rdylist;
u8 rdylist_act;
};
/* for hsplit_0 field of Rx HMC context */
enum i40e_hmc_obj_rx_hsplit_0 {
I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
};
/* fcoe_cntx and fcoe_filt are for debugging purpose only */
struct i40e_hmc_obj_fcoe_cntx {
u32 rsv[32];
};
struct i40e_hmc_obj_fcoe_filt {
u32 rsv[8];
};
/* Context sizes for LAN objects */
enum i40e_hmc_lan_object_size {
I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
};
#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
#define I40E_HMC_OBJ_SIZE_TXQ 128
#define I40E_HMC_OBJ_SIZE_RXQ 32
#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
enum i40e_hmc_lan_rsrc_type {
I40E_HMC_LAN_FULL = 0,
I40E_HMC_LAN_TX = 1,
I40E_HMC_LAN_RX = 2,
I40E_HMC_FCOE_CTX = 3,
I40E_HMC_FCOE_FILT = 4,
I40E_HMC_LAN_MAX = 5
};
enum i40e_hmc_model {
I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
I40E_HMC_MODEL_DIRECT_ONLY = 1,
I40E_HMC_MODEL_PAGED_ONLY = 2,
I40E_HMC_MODEL_UNKNOWN,
};
struct i40e_hmc_lan_create_obj_info {
struct i40e_hmc_info *hmc_info;
u32 rsrc_type;
u32 start_idx;
u32 count;
enum i40e_sd_entry_type entry_type;
u64 direct_mode_sz;
};
struct i40e_hmc_lan_delete_obj_info {
struct i40e_hmc_info *hmc_info;
u32 rsrc_type;
u32 start_idx;
u32 count;
};
i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
u32 rxq_num, u32 fcoe_cntx_num,
u32 fcoe_filt_num);
i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
enum i40e_hmc_model model);
i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
u16 queue);
i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
u16 queue,
struct i40e_hmc_obj_txq *s);
i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
u16 queue);
i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
u16 queue,
struct i40e_hmc_obj_rxq *s);
#endif /* _I40E_LAN_HMC_H_ */
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
#include "i40e_type.h"
#include "i40e_alloc.h"
#include "i40e_virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
* mostly because they are needed even before the init
* has happened and will assist in the early SW and FW
* setup.
*/
/* adminq functions */
i40e_status i40evf_init_adminq(struct i40e_hw *hw);
i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
bool i40evf_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
void i40evf_debug_aq(struct i40e_hw *hw,
enum i40e_debug_mask mask,
void *desc,
void *buffer);
void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
bool i40evf_check_asq_alive(struct i40e_hw *hw);
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading);
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u8 *mac_addr, u16 ethtype, u16 flags,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */
This source diff could not be displayed because it is too large. You can view the blob instead.
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_STATUS_H_
#define _I40E_STATUS_H_
/* Error Codes */
enum i40e_status_code {
I40E_SUCCESS = 0,
I40E_ERR_NVM = -1,
I40E_ERR_NVM_CHECKSUM = -2,
I40E_ERR_PHY = -3,
I40E_ERR_CONFIG = -4,
I40E_ERR_PARAM = -5,
I40E_ERR_MAC_TYPE = -6,
I40E_ERR_UNKNOWN_PHY = -7,
I40E_ERR_LINK_SETUP = -8,
I40E_ERR_ADAPTER_STOPPED = -9,
I40E_ERR_INVALID_MAC_ADDR = -10,
I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
I40E_ERR_MASTER_REQUESTS_PENDING = -12,
I40E_ERR_INVALID_LINK_SETTINGS = -13,
I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
I40E_ERR_RESET_FAILED = -15,
I40E_ERR_SWFW_SYNC = -16,
I40E_ERR_NO_AVAILABLE_VSI = -17,
I40E_ERR_NO_MEMORY = -18,
I40E_ERR_BAD_PTR = -19,
I40E_ERR_RING_FULL = -20,
I40E_ERR_INVALID_PD_ID = -21,
I40E_ERR_INVALID_QP_ID = -22,
I40E_ERR_INVALID_CQ_ID = -23,
I40E_ERR_INVALID_CEQ_ID = -24,
I40E_ERR_INVALID_AEQ_ID = -25,
I40E_ERR_INVALID_SIZE = -26,
I40E_ERR_INVALID_ARP_INDEX = -27,
I40E_ERR_INVALID_FPM_FUNC_ID = -28,
I40E_ERR_QP_INVALID_MSG_SIZE = -29,
I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
I40E_ERR_INVALID_FRAG_COUNT = -31,
I40E_ERR_QUEUE_EMPTY = -32,
I40E_ERR_INVALID_ALIGNMENT = -33,
I40E_ERR_FLUSHED_QUEUE = -34,
I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
I40E_ERR_TIMEOUT = -37,
I40E_ERR_OPCODE_MISMATCH = -38,
I40E_ERR_CQP_COMPL_ERROR = -39,
I40E_ERR_INVALID_VF_ID = -40,
I40E_ERR_INVALID_HMCFN_ID = -41,
I40E_ERR_BACKING_PAGE_ERROR = -42,
I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
I40E_ERR_INVALID_PBLE_INDEX = -44,
I40E_ERR_INVALID_SD_INDEX = -45,
I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
I40E_ERR_INVALID_SD_TYPE = -47,
I40E_ERR_MEMCPY_FAILED = -48,
I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
I40E_ERR_SRQ_ENABLED = -52,
I40E_ERR_ADMIN_QUEUE_ERROR = -53,
I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
I40E_ERR_BUF_TOO_SHORT = -55,
I40E_ERR_ADMIN_QUEUE_FULL = -56,
I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
I40E_ERR_BAD_IWARP_CQE = -58,
I40E_ERR_NVM_BLANK_MODE = -59,
I40E_ERR_NOT_IMPLEMENTED = -60,
I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
I40E_ERR_DIAG_TEST_FAILED = -62,
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
};
#endif /* _I40E_STATUS_H_ */
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
#include "i40e_status.h"
#include "i40e_osdep.h"
#include "i40e_register.h"
#include "i40e_adminq.h"
#include "i40e_hmc.h"
#include "i40e_lan_hmc.h"
/* Device IDs */
#define I40E_SFP_XL710_DEVICE_ID 0x1572
#define I40E_SFP_X710_DEVICE_ID 0x1573
#define I40E_QEMU_DEVICE_ID 0x1574
#define I40E_KX_A_DEVICE_ID 0x157F
#define I40E_KX_B_DEVICE_ID 0x1580
#define I40E_KX_C_DEVICE_ID 0x1581
#define I40E_KX_D_DEVICE_ID 0x1582
#define I40E_QSFP_A_DEVICE_ID 0x1583
#define I40E_QSFP_B_DEVICE_ID 0x1584
#define I40E_QSFP_C_DEVICE_ID 0x1585
#define I40E_VF_DEVICE_ID 0x154C
#define I40E_VF_HV_DEVICE_ID 0x1571
#define i40e_is_40G_device(d) ((d) == I40E_QSFP_A_DEVICE_ID || \
(d) == I40E_QSFP_B_DEVICE_ID || \
(d) == I40E_QSFP_C_DEVICE_ID)
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5
#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
/* forward declaration */
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define ETH_ALEN 6
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
/* bitfields for Tx queue mapping in QTX_CTL */
#define I40E_QTX_CTL_VF_QUEUE 0x0
#define I40E_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2
/* debug masks - set these bits in hw->debug_mask to control output */
enum i40e_debug_mask {
I40E_DEBUG_INIT = 0x00000001,
I40E_DEBUG_RELEASE = 0x00000002,
I40E_DEBUG_LINK = 0x00000010,
I40E_DEBUG_PHY = 0x00000020,
I40E_DEBUG_HMC = 0x00000040,
I40E_DEBUG_NVM = 0x00000080,
I40E_DEBUG_LAN = 0x00000100,
I40E_DEBUG_FLOW = 0x00000200,
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
I40E_DEBUG_AQ_COMMAND = 0x06000000,
I40E_DEBUG_AQ = 0x0F000000,
I40E_DEBUG_USER = 0xF0000000,
I40E_DEBUG_ALL = 0xFFFFFFFF
};
/* PCI Bus Info */
#define I40E_PCI_LINK_WIDTH_1 0x10
#define I40E_PCI_LINK_WIDTH_2 0x20
#define I40E_PCI_LINK_WIDTH_4 0x40
#define I40E_PCI_LINK_WIDTH_8 0x80
#define I40E_PCI_LINK_SPEED_2500 0x1
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
* flexibility of using the same main driver code but adapting to slightly
* different hardware needs as new parts are developed. For this architecture,
* the Firmware and AdminQ are intended to insulate the driver from most of the
* future changes, but these structures will also do part of the job.
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_GENERIC,
};
enum i40e_media_type {
I40E_MEDIA_TYPE_UNKNOWN = 0,
I40E_MEDIA_TYPE_FIBER,
I40E_MEDIA_TYPE_BASET,
I40E_MEDIA_TYPE_BACKPLANE,
I40E_MEDIA_TYPE_CX4,
I40E_MEDIA_TYPE_DA,
I40E_MEDIA_TYPE_VIRTUAL
};
enum i40e_fc_mode {
I40E_FC_NONE = 0,
I40E_FC_RX_PAUSE,
I40E_FC_TX_PAUSE,
I40E_FC_FULL,
I40E_FC_PFC,
I40E_FC_DEFAULT
};
enum i40e_vsi_type {
I40E_VSI_MAIN = 0,
I40E_VSI_VMDQ1,
I40E_VSI_VMDQ2,
I40E_VSI_CTRL,
I40E_VSI_FCOE,
I40E_VSI_MIRROR,
I40E_VSI_SRIOV,
I40E_VSI_FDIR,
I40E_VSI_TYPE_UNKNOWN
};
enum i40e_queue_type {
I40E_QUEUE_TYPE_RX = 0,
I40E_QUEUE_TYPE_TX,
I40E_QUEUE_TYPE_PE_CEQ,
I40E_QUEUE_TYPE_UNKNOWN
};
struct i40e_link_status {
enum i40e_aq_phy_type phy_type;
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
};
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
u32 autoneg_advertised;
u32 phy_id;
u32 module_type;
bool get_link_info;
enum i40e_media_type media_type;
};
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
u32 switch_mode;
#define I40E_NVM_IMAGE_TYPE_EVB 0x0
#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
bool sr_iov_1_1;
bool vmdq;
bool evb_802_1_qbg; /* Edge Virtual Bridging */
bool evb_802_1_qbh; /* Bridge Port Extension */
bool dcb;
bool fcoe;
bool mfp_mode_1;
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
bool fd;
u32 fd_filters_guaranteed;
u32 fd_filters_best_effort;
bool rss;
u32 rss_table_size;
u32 rss_table_entry_width;
bool led[I40E_HW_CAP_MAX_GPIO];
bool sdp[I40E_HW_CAP_MAX_GPIO];
u32 nvm_image_type;
u32 num_flow_director_filters;
u32 num_vfs;
u32 vf_base_id;
u32 num_vsis;
u32 num_rx_qp;
u32 num_tx_qp;
u32 base_queue;
u32 num_msix_vectors;
u32 num_msix_vectors_vf;
u32 led_pin_num;
u32 sdp_pin_num;
u32 mdio_port_num;
u32 mdio_port_mode;
u8 rx_buf_chain_len;
u32 enabled_tcmap;
u32 maxtc;
};
struct i40e_mac_info {
enum i40e_mac_type type;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
u8 san_addr[ETH_ALEN];
u16 max_fcoeq;
};
enum i40e_aq_resources_ids {
I40E_NVM_RESOURCE_ID = 1
};
enum i40e_aq_resource_access_type {
I40E_RESOURCE_READ = 1,
I40E_RESOURCE_WRITE
};
struct i40e_nvm_info {
u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
u64 hw_semaphore_wait; /* - || - */
u32 timeout; /* [ms] */
u16 sr_size; /* Shadow RAM size in words */
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
u16 version; /* NVM package version */
u32 eetrack; /* NVM data version */
};
/* PCI bus types */
enum i40e_bus_type {
i40e_bus_type_unknown = 0,
i40e_bus_type_pci,
i40e_bus_type_pcix,
i40e_bus_type_pci_express,
i40e_bus_type_reserved
};
/* PCI bus speeds */
enum i40e_bus_speed {
i40e_bus_speed_unknown = 0,
i40e_bus_speed_33 = 33,
i40e_bus_speed_66 = 66,
i40e_bus_speed_100 = 100,
i40e_bus_speed_120 = 120,
i40e_bus_speed_133 = 133,
i40e_bus_speed_2500 = 2500,
i40e_bus_speed_5000 = 5000,
i40e_bus_speed_8000 = 8000,
i40e_bus_speed_reserved
};
/* PCI bus widths */
enum i40e_bus_width {
i40e_bus_width_unknown = 0,
i40e_bus_width_pcie_x1 = 1,
i40e_bus_width_pcie_x2 = 2,
i40e_bus_width_pcie_x4 = 4,
i40e_bus_width_pcie_x8 = 8,
i40e_bus_width_32 = 32,
i40e_bus_width_64 = 64,
i40e_bus_width_reserved
};
/* Bus parameters */
struct i40e_bus_info {
enum i40e_bus_speed speed;
enum i40e_bus_width width;
enum i40e_bus_type type;
u16 func;
u16 device;
u16 lan_id;
};
/* Flow control (FC) parameters */
struct i40e_fc_info {
enum i40e_fc_mode current_mode; /* FC mode in effect */
enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
};
#define I40E_MAX_TRAFFIC_CLASS 8
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DCBX_MAX_APPS 32
#define I40E_LLDPDU_SIZE 1500
/* IEEE 802.1Qaz ETS Configuration data */
struct i40e_ieee_ets_config {
u8 willing;
u8 cbs;
u8 maxtcs;
u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
};
/* IEEE 802.1Qaz ETS Recommendation data */
struct i40e_ieee_ets_recommend {
u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
};
/* IEEE 802.1Qaz PFC Configuration data */
struct i40e_ieee_pfc_config {
u8 willing;
u8 mbc;
u8 pfccap;
u8 pfcenable;
};
/* IEEE 802.1Qaz Application Priority data */
struct i40e_ieee_app_priority_table {
u8 priority;
u8 selector;
u16 protocolid;
};
struct i40e_dcbx_config {
u32 numapps;
struct i40e_ieee_ets_config etscfg;
struct i40e_ieee_ets_recommend etsrec;
struct i40e_ieee_pfc_config pfc;
struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
};
/* Port hardware description */
struct i40e_hw {
u8 __iomem *hw_addr;
void *back;
/* function pointer structs */
struct i40e_phy_info phy;
struct i40e_mac_info mac;
struct i40e_bus_info bus;
struct i40e_nvm_info nvm;
struct i40e_fc_info fc;
/* pci info */
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
u16 subsystem_vendor_id;
u8 revision_id;
u8 port;
bool adapter_stopped;
/* capabilities for entire device and PCI func */
struct i40e_hw_capabilities dev_caps;
struct i40e_hw_capabilities func_caps;
/* Flow Director shared filter space */
u16 fdir_shared_filter_count;
/* device profile info */
u8 pf_id;
u16 main_vsi_seid;
/* Closest numa node to the device */
u16 numa_node;
/* Admin Queue info */
struct i40e_adminq_info aq;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
/* LLDP/DCBX Status */
u16 dcbx_status;
/* DCBX info */
struct i40e_dcbx_config local_dcbx_config;
struct i40e_dcbx_config remote_dcbx_config;
/* debug mask */
u32 debug_mask;
};
struct i40e_driver_version {
u8 major_version;
u8 minor_version;
u8 build_version;
u8 subbuild_version;
};
/* RX Descriptors */
union i40e_16byte_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
struct {
union {
__le16 mirroring_status;
__le16 fcoe_ctx_id;
} mirr_fcoe;
__le16 l2tag1;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
__le32 fd_id; /* Flow director filter id */
__le32 fcoe_param; /* FCoE DDP Context id */
} hi_dword;
} qword0;
struct {
/* ext status/error/pktype/length */
__le64 status_error_len;
} qword1;
} wb; /* writeback */
};
union i40e_32byte_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_buffer_addr is DD bit */
__le64 rsvd1;
__le64 rsvd2;
} read;
struct {
struct {
struct {
union {
__le16 mirroring_status;
__le16 fcoe_ctx_id;
} mirr_fcoe;
__le16 l2tag1;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
__le32 fcoe_param; /* FCoE DDP Context id */
} hi_dword;
} qword0;
struct {
/* status/error/pktype/length */
__le64 status_error_len;
} qword1;
struct {
__le16 ext_status; /* extended status */
__le16 rsvd;
__le16 l2tag2_1;
__le16 l2tag2_2;
} qword2;
struct {
union {
__le32 flex_bytes_lo;
__le32 pe_status;
} lo_dword;
union {
__le32 flex_bytes_hi;
__le32 fd_id;
} hi_dword;
} qword3;
} wb; /* writeback */
};
#define I40E_RXD_QW1_STATUS_SHIFT 0
#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0,
I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
};
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
I40E_RX_DESC_FLTSTAT_RSV = 2,
I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
};
#define I40E_RXD_QW1_ERROR_SHIFT 19
#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
enum i40e_rx_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
I40E_RX_DESC_ERROR_L3L4E_FC = 2,
I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
};
#define I40E_RXD_QW1_PTYPE_SHIFT 30
#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
/* Packet type non-ip values */
enum i40e_rx_l2_ptype {
I40E_RX_PTYPE_L2_RESERVED = 0,
I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
I40E_RX_PTYPE_L2_ARP = 11,
I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
};
struct i40e_rx_ptype_decoded {
u32 ptype:8;
u32 known:1;
u32 outer_ip:1;
u32 outer_ip_ver:1;
u32 outer_frag:1;
u32 tunnel_type:3;
u32 tunnel_end_prot:2;
u32 tunnel_end_frag:1;
u32 inner_prot:4;
u32 payload_layer:3;
};
enum i40e_rx_ptype_outer_ip {
I40E_RX_PTYPE_OUTER_L2 = 0,
I40E_RX_PTYPE_OUTER_IP = 1
};
enum i40e_rx_ptype_outer_ip_ver {
I40E_RX_PTYPE_OUTER_NONE = 0,
I40E_RX_PTYPE_OUTER_IPV4 = 0,
I40E_RX_PTYPE_OUTER_IPV6 = 1
};
enum i40e_rx_ptype_outer_fragmented {
I40E_RX_PTYPE_NOT_FRAG = 0,
I40E_RX_PTYPE_FRAG = 1
};
enum i40e_rx_ptype_tunnel_type {
I40E_RX_PTYPE_TUNNEL_NONE = 0,
I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
};
enum i40e_rx_ptype_tunnel_end_prot {
I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
};
enum i40e_rx_ptype_inner_prot {
I40E_RX_PTYPE_INNER_PROT_NONE = 0,
I40E_RX_PTYPE_INNER_PROT_UDP = 1,
I40E_RX_PTYPE_INNER_PROT_TCP = 2,
I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
};
enum i40e_rx_ptype_payload_layer {
I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
I40E_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
};
enum i40e_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
};
#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
enum i40e_rx_prog_status_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
};
enum i40e_rx_prog_status_desc_prog_id_masks {
I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
};
enum i40e_rx_prog_status_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
};
/* TX Descriptor */
struct i40e_tx_desc {
__le64 buffer_addr; /* Address of descriptor's data buf */
__le64 cmd_type_offset_bsz;
};
#define I40E_TXD_QW1_DTYPE_SHIFT 0
#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
enum i40e_tx_desc_dtype_value {
I40E_TX_DESC_DTYPE_DATA = 0x0,
I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
};
#define I40E_TXD_QW1_CMD_SHIFT 4
#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
enum i40e_tx_desc_cmd_bits {
I40E_TX_DESC_CMD_EOP = 0x0001,
I40E_TX_DESC_CMD_RS = 0x0002,
I40E_TX_DESC_CMD_ICRC = 0x0004,
I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
I40E_TX_DESC_CMD_DUMMY = 0x0010,
I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
I40E_TX_DESC_CMD_FCOET = 0x0080,
I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
};
#define I40E_TXD_QW1_OFFSET_SHIFT 16
#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
I40E_TXD_QW1_OFFSET_SHIFT)
enum i40e_tx_desc_length_fields {
/* Note: These are predefined bit offsets */
I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
};
#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
#define I40E_TXD_QW1_L2TAG1_SHIFT 48
#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
/* Context descriptors */
struct i40e_tx_context_desc {
__le32 tunneling_params;
__le16 l2tag2;
__le16 rsvd;
__le64 type_cmd_tso_mss;
};
#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
enum i40e_tx_ctx_desc_cmd_bits {
I40E_TX_CTX_DESC_TSO = 0x01,
I40E_TX_CTX_DESC_TSYN = 0x02,
I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
I40E_TX_CTX_DESC_SWPE = 0x40
};
#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
I40E_TXD_CTX_QW1_MSS_SHIFT)
#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
enum i40e_tx_ctx_desc_eipt_offload {
I40E_TX_CTX_EXT_IP_NONE = 0x0,
I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
I40E_TX_CTX_EXT_IP_IPV4 = 0x3
};
#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
I40E_TXD_CTX_QW0_NATLEN_SHIFT)
#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
struct i40e_filter_program_desc {
__le32 qindex_flex_ptype_vsi;
__le32 rsvd;
__le32 dtype_cmd_cntindex;
__le32 fd_id;
};
#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
/* Note: Values 0-28 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
/* Note: Values 37-38 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
/* Note: Value 47 is reserved for future use */
I40E_FILTER_PCTYPE_FCOE_OX = 48,
I40E_FILTER_PCTYPE_FCOE_RX = 49,
I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
/* Note: Values 51-62 are reserved for future use */
I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
};
enum i40e_filter_program_desc_dest {
I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
};
enum i40e_filter_program_desc_fd_status {
I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
};
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
enum i40e_filter_program_desc_pcmd {
I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
};
#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
enum i40e_filter_type {
I40E_FLOW_DIRECTOR_FLTR = 0,
I40E_PE_QUAD_HASH_FLTR = 1,
I40E_ETHERTYPE_FLTR,
I40E_FCOE_CTX_FLTR,
I40E_MAC_VLAN_FLTR,
I40E_HASH_FLTR
};
struct i40e_vsi_context {
u16 seid;
u16 uplink_seid;
u16 vsi_number;
u16 vsis_allocated;
u16 vsis_unallocated;
u16 flags;
u8 pf_num;
u8 vf_num;
u8 connection_type;
struct i40e_aqc_vsi_properties_data info;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct i40e_eth_stats {
u64 rx_bytes; /* gorc */
u64 rx_unicast; /* uprc */
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
u64 rx_errors; /* repc */
u64 rx_missed; /* rmpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
u64 tx_multicast; /* mptc */
u64 tx_broadcast; /* bptc */
u64 tx_discards; /* tdpc */
u64 tx_errors; /* tepc */
};
/* Statistics collected by the MAC */
struct i40e_hw_port_stats {
/* eth stats collected by the port */
struct i40e_eth_stats eth;
/* additional port specific stats */
u64 tx_dropped_link_down; /* tdold */
u64 crc_errors; /* crcerrs */
u64 illegal_bytes; /* illerrc */
u64 error_bytes; /* errbc */
u64 mac_local_faults; /* mlfc */
u64 mac_remote_faults; /* mrfc */
u64 rx_length_errors; /* rlec */
u64 link_xon_rx; /* lxonrxc */
u64 link_xoff_rx; /* lxoffrxc */
u64 priority_xon_rx[8]; /* pxonrxc[8] */
u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
u64 link_xon_tx; /* lxontxc */
u64 link_xoff_tx; /* lxofftxc */
u64 priority_xon_tx[8]; /* pxontxc[8] */
u64 priority_xoff_tx[8]; /* pxofftxc[8] */
u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
u64 rx_size_64; /* prc64 */
u64 rx_size_127; /* prc127 */
u64 rx_size_255; /* prc255 */
u64 rx_size_511; /* prc511 */
u64 rx_size_1023; /* prc1023 */
u64 rx_size_1522; /* prc1522 */
u64 rx_size_big; /* prc9522 */
u64 rx_undersize; /* ruc */
u64 rx_fragments; /* rfc */
u64 rx_oversize; /* roc */
u64 rx_jabber; /* rjc */
u64 tx_size_64; /* ptc64 */
u64 tx_size_127; /* ptc127 */
u64 tx_size_255; /* ptc255 */
u64 tx_size_511; /* ptc511 */
u64 tx_size_1023; /* ptc1023 */
u64 tx_size_1522; /* ptc1522 */
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
};
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_NVM_IMAGE_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
#define I40E_SR_VPD_PTR 0x2F
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
/* Shadow RAM related */
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
#define I40E_SR_WORDS_IN_1KB 512
/* Checksum should be calculated such that after adding all the words,
* including the checksum word itself, the sum should be 0xBABA.
*/
#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
#define I40E_SRRD_SRCTL_ATTEMPTS 100000
enum i40e_switch_element_types {
I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
I40E_SWITCH_ELEMENT_TYPE_PF = 2,
I40E_SWITCH_ELEMENT_TYPE_VF = 3,
I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
I40E_SWITCH_ELEMENT_TYPE_PE = 16,
I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
I40E_SWITCH_ELEMENT_TYPE_PA = 18,
I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
};
/* Supported EtherType filters */
enum i40e_ether_type_index {
I40E_ETHER_TYPE_1588 = 0,
I40E_ETHER_TYPE_FIP = 1,
I40E_ETHER_TYPE_OUI_EXTENDED = 2,
I40E_ETHER_TYPE_MAC_CONTROL = 3,
I40E_ETHER_TYPE_LLDP = 4,
I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
I40E_ETHER_TYPE_QCN_CNM = 7,
I40E_ETHER_TYPE_8021X = 8,
I40E_ETHER_TYPE_ARP = 9,
I40E_ETHER_TYPE_RSV1 = 10,
I40E_ETHER_TYPE_RSV2 = 11,
};
/* Filter context base size is 1K */
#define I40E_HASH_FILTER_BASE_SIZE 1024
/* Supported Hash filter values */
enum i40e_hash_filter_size {
I40E_HASH_FILTER_SIZE_1K = 0,
I40E_HASH_FILTER_SIZE_2K = 1,
I40E_HASH_FILTER_SIZE_4K = 2,
I40E_HASH_FILTER_SIZE_8K = 3,
I40E_HASH_FILTER_SIZE_16K = 4,
I40E_HASH_FILTER_SIZE_32K = 5,
I40E_HASH_FILTER_SIZE_64K = 6,
I40E_HASH_FILTER_SIZE_128K = 7,
I40E_HASH_FILTER_SIZE_256K = 8,
I40E_HASH_FILTER_SIZE_512K = 9,
I40E_HASH_FILTER_SIZE_1M = 10,
};
/* DMA context base size is 0.5K */
#define I40E_DMA_CNTX_BASE_SIZE 512
/* Supported DMA context values */
enum i40e_dma_cntx_size {
I40E_DMA_CNTX_SIZE_512 = 0,
I40E_DMA_CNTX_SIZE_1K = 1,
I40E_DMA_CNTX_SIZE_2K = 2,
I40E_DMA_CNTX_SIZE_4K = 3,
I40E_DMA_CNTX_SIZE_8K = 4,
I40E_DMA_CNTX_SIZE_16K = 5,
I40E_DMA_CNTX_SIZE_32K = 6,
I40E_DMA_CNTX_SIZE_64K = 7,
I40E_DMA_CNTX_SIZE_128K = 8,
I40E_DMA_CNTX_SIZE_256K = 9,
};
/* Supported Hash look up table (LUT) sizes */
enum i40e_hash_lut_size {
I40E_HASH_LUT_SIZE_128 = 0,
I40E_HASH_LUT_SIZE_512 = 1,
};
/* Structure to hold a per PF filter control settings */
struct i40e_filter_control_settings {
/* number of PE Quad Hash filter buckets */
enum i40e_hash_filter_size pe_filt_num;
/* number of PE Quad Hash contexts */
enum i40e_dma_cntx_size pe_cntx_num;
/* number of FCoE filter buckets */
enum i40e_hash_filter_size fcoe_filt_num;
/* number of FCoE DDP contexts */
enum i40e_dma_cntx_size fcoe_cntx_num;
/* size of the Hash LUT */
enum i40e_hash_lut_size hash_lut_size;
/* enable FDIR filters for PF and its VFs */
bool enable_fdir;
/* enable Ethertype filters for PF and its VFs */
bool enable_ethtype;
/* enable MAC/VLAN filters for PF and its VFs */
bool enable_macvlan;
};
/* Structure to hold device level control filter counts */
struct i40e_control_filter_stats {
u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
u16 etype_used; /* Used perfect EtherType filters */
u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
u16 etype_free; /* Un-used perfect EtherType filters */
};
enum i40e_reset_type {
I40E_RESET_POR = 0,
I40E_RESET_CORER = 1,
I40E_RESET_GLOBR = 2,
I40E_RESET_EMPR = 3,
};
#endif /* _I40E_TYPE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment