Commit ca9c54d2 authored by Dexuan Cui's avatar Dexuan Cui Committed by David S. Miller

net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)

Add a VF driver for Microsoft Azure Network Adapter (MANA) that will be
available in the future.
Co-developed-by: default avatarHaiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: default avatarHaiyang Zhang <haiyangz@microsoft.com>
Co-developed-by: default avatarShachar Raindel <shacharr@microsoft.com>
Signed-off-by: default avatarShachar Raindel <shacharr@microsoft.com>
Signed-off-by: default avatarDexuan Cui <decui@microsoft.com>
Reviewed-by: default avatarStephen Hemminger <stephen@networkplumber.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 83c1ca25
......@@ -8267,11 +8267,12 @@ S: Maintained
T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/hi556.c
Hyper-V CORE AND DRIVERS
Hyper-V/Azure CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
M: Stephen Hemminger <sthemmin@microsoft.com>
M: Wei Liu <wei.liu@kernel.org>
M: Dexuan Cui <decui@microsoft.com>
L: linux-hyperv@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
......@@ -8288,6 +8289,7 @@ F: drivers/hid/hid-hyperv.c
F: drivers/hv/
F: drivers/input/serio/hyperv-keyboard.c
F: drivers/iommu/hyperv-iommu.c
F: drivers/net/ethernet/microsoft/
F: drivers/net/hyperv/
F: drivers/pci/controller/pci-hyperv-intf.c
F: drivers/pci/controller/pci-hyperv.c
......
......@@ -82,6 +82,7 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
......
......@@ -45,6 +45,7 @@ obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
obj-$(CONFIG_NET_VENDOR_MICROSOFT) += microsoft/
obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_KORINA) += korina.o
......
#
# Microsoft Azure network device configuration
#
config NET_VENDOR_MICROSOFT
bool "Microsoft Network Devices"
default y
help
If you have a network (Ethernet) device belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip the
question about Microsoft network devices. If you say Y, you will be
asked for your specific device in the following question.
if NET_VENDOR_MICROSOFT
config MICROSOFT_MANA
tristate "Microsoft Azure Network Adapter (MANA) support"
depends on PCI_MSI && X86_64
select PCI_HYPERV
help
This driver supports Microsoft Azure Network Adapter (MANA).
So far, the driver is only supported on X86_64.
To compile this driver as a module, choose M here.
The module will be called mana.
endif #NET_VENDOR_MICROSOFT
#
# Makefile for the Microsoft Azure network device driver.
#
obj-$(CONFIG_MICROSOFT_MANA) += mana/
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
#
# Makefile for the Microsoft Azure Network Adapter driver
obj-$(CONFIG_MICROSOFT_MANA) += mana.o
mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright (c) 2021, Microsoft Corporation. */
#ifndef _HW_CHANNEL_H
#define _HW_CHANNEL_H
#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4
#define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000
#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000
#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1
#define HWC_INIT_DATA_CQID 1
#define HWC_INIT_DATA_RQID 2
#define HWC_INIT_DATA_SQID 3
#define HWC_INIT_DATA_QUEUE_DEPTH 4
#define HWC_INIT_DATA_MAX_REQUEST 5
#define HWC_INIT_DATA_MAX_RESPONSE 6
#define HWC_INIT_DATA_MAX_NUM_CQS 7
#define HWC_INIT_DATA_PDID 8
#define HWC_INIT_DATA_GPA_MKEY 9
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
*/
union hwc_init_eq_id_db {
u32 as_uint32;
struct {
u32 eq_id : 16;
u32 doorbell : 16;
};
}; /* HW DATA */
union hwc_init_type_data {
u32 as_uint32;
struct {
u32 value : 24;
u32 type : 8;
};
}; /* HW DATA */
struct hwc_rx_oob {
u32 type : 6;
u32 eom : 1;
u32 som : 1;
u32 vendor_err : 8;
u32 reserved1 : 16;
u32 src_virt_wq : 24;
u32 src_vfid : 8;
u32 reserved2;
union {
u32 wqe_addr_low;
u32 wqe_offset;
};
u32 wqe_addr_high;
u32 client_data_unit : 14;
u32 reserved3 : 18;
u32 tx_oob_data_size;
u32 chunk_offset : 21;
u32 reserved4 : 11;
}; /* HW DATA */
struct hwc_tx_oob {
u32 reserved1;
u32 reserved2;
u32 vrq_id : 24;
u32 dest_vfid : 8;
u32 vrcq_id : 24;
u32 reserved3 : 8;
u32 vscq_id : 24;
u32 loopback : 1;
u32 lso_override: 1;
u32 dest_pf : 1;
u32 reserved4 : 5;
u32 vsq_id : 24;
u32 reserved5 : 8;
}; /* HW DATA */
struct hwc_work_request {
void *buf_va;
void *buf_sge_addr;
u32 buf_len;
u32 msg_size;
struct gdma_wqe_request wqe_req;
struct hwc_tx_oob tx_oob;
struct gdma_sge sge;
};
/* hwc_dma_buf represents the array of in-flight WQEs.
* mem_info as know as the GDMA mapped memory is partitioned and used by
* in-flight WQEs.
* The number of WQEs is determined by the number of in-flight messages.
*/
struct hwc_dma_buf {
struct gdma_mem_info mem_info;
u32 gpa_mkey;
u32 num_reqs;
struct hwc_work_request reqs[];
};
typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id,
const struct hwc_rx_oob *rx_oob);
typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id,
const struct hwc_rx_oob *rx_oob);
struct hwc_cq {
struct hw_channel_context *hwc;
struct gdma_queue *gdma_cq;
struct gdma_queue *gdma_eq;
struct gdma_comp *comp_buf;
u16 queue_depth;
hwc_rx_event_handler_t *rx_event_handler;
void *rx_event_ctx;
hwc_tx_event_handler_t *tx_event_handler;
void *tx_event_ctx;
};
struct hwc_wq {
struct hw_channel_context *hwc;
struct gdma_queue *gdma_wq;
struct hwc_dma_buf *msg_buf;
u16 queue_depth;
struct hwc_cq *hwc_cq;
};
struct hwc_caller_ctx {
struct completion comp_event;
void *output_buf;
u32 output_buflen;
u32 error; /* Linux error code */
u32 status_code;
};
struct hw_channel_context {
struct gdma_dev *gdma_dev;
struct device *dev;
u16 num_inflight_msg;
u32 max_req_msg_size;
u16 hwc_init_q_depth_max;
u32 hwc_init_max_req_msg_size;
u32 hwc_init_max_resp_msg_size;
struct completion hwc_init_eqe_comp;
struct hwc_wq *rxq;
struct hwc_wq *txq;
struct hwc_cq *cq;
struct semaphore sema;
struct gdma_resource inflight_msg_res;
struct hwc_caller_ctx *caller_ctx;
};
int mana_hwc_create_channel(struct gdma_context *gc);
void mana_hwc_destroy_channel(struct gdma_context *gc);
int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
const void *req, u32 resp_len, void *resp);
#endif /* _HW_CHANNEL_H */
This diff is collapsed.
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include "mana.h"
static const struct {
char name[ETH_GSTRING_LEN];
u16 offset;
} mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
};
static int mana_get_sset_count(struct net_device *ndev, int stringset)
{
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
if (stringset != ETH_SS_STATS)
return -EINVAL;
return ARRAY_SIZE(mana_eth_stats) + num_queues * 4;
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
u8 *p = data;
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) {
memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < num_queues; i++) {
sprintf(p, "rx_%d_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_%d_bytes", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < num_queues; i++) {
sprintf(p, "tx_%d_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_bytes", i);
p += ETH_GSTRING_LEN;
}
}
static void mana_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *e_stats, u64 *data)
{
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
struct mana_stats *stats;
unsigned int start;
u64 packets, bytes;
int q, i = 0;
if (!apc->port_is_up)
return;
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
for (q = 0; q < num_queues; q++) {
stats = &apc->rxqs[q]->stats;
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
packets = stats->packets;
bytes = stats->bytes;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
}
for (q = 0; q < num_queues; q++) {
stats = &apc->tx_qp[q].txq.stats;
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
packets = stats->packets;
bytes = stats->bytes;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
}
}
static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
u32 *rules)
{
struct mana_port_context *apc = netdev_priv(ndev);
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = apc->num_queues;
return 0;
}
return -EOPNOTSUPP;
}
static u32 mana_get_rxfh_key_size(struct net_device *ndev)
{
return MANA_HASH_KEY_SIZE;
}
static u32 mana_rss_indir_size(struct net_device *ndev)
{
return MANA_INDIRECT_TABLE_SIZE;
}
static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mana_port_context *apc = netdev_priv(ndev);
int i;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (indir) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
indir[i] = apc->indir_table[i];
}
if (key)
memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE);
return 0;
}
static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mana_port_context *apc = netdev_priv(ndev);
bool update_hash = false, update_table = false;
u32 save_table[MANA_INDIRECT_TABLE_SIZE];
u8 save_key[MANA_HASH_KEY_SIZE];
int i, err;
if (!apc->port_is_up)
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (indir) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
if (indir[i] >= apc->num_queues)
return -EINVAL;
update_table = true;
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
save_table[i] = apc->indir_table[i];
apc->indir_table[i] = indir[i];
}
}
if (key) {
update_hash = true;
memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE);
}
err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
if (err) { /* recover to original values */
if (update_table) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
apc->indir_table[i] = save_table[i];
}
if (update_hash)
memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
}
return err;
}
static void mana_get_channels(struct net_device *ndev,
struct ethtool_channels *channel)
{
struct mana_port_context *apc = netdev_priv(ndev);
channel->max_combined = apc->max_queues;
channel->combined_count = apc->num_queues;
}
static int mana_set_channels(struct net_device *ndev,
struct ethtool_channels *channels)
{
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int new_count = channels->combined_count;
unsigned int old_count = apc->num_queues;
int err, err2;
if (!apc->port_is_up)
return -EOPNOTSUPP;
err = mana_detach(ndev, false);
if (err) {
netdev_err(ndev, "mana_detach failed: %d\n", err);
return err;
}
apc->num_queues = new_count;
err = mana_attach(ndev);
if (!err)
return 0;
netdev_err(ndev, "mana_attach failed: %d\n", err);
/* Try to roll it back to the old configuration. */
apc->num_queues = old_count;
err2 = mana_attach(ndev);
if (err2)
netdev_err(ndev, "mana re-attach failed: %d\n", err2);
return err;
}
const struct ethtool_ops mana_ethtool_ops = {
.get_ethtool_stats = mana_get_ethtool_stats,
.get_sset_count = mana_get_sset_count,
.get_strings = mana_get_strings,
.get_rxnfc = mana_get_rxnfc,
.get_rxfh_key_size = mana_get_rxfh_key_size,
.get_rxfh_indir_size = mana_rss_indir_size,
.get_rxfh = mana_get_rxfh,
.set_rxfh = mana_set_rxfh,
.get_channels = mana_get_channels,
.set_channels = mana_set_channels,
};
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/mm.h>
#include "shm_channel.h"
#define PAGE_FRAME_L48_WIDTH_BYTES 6
#define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8)
#define PAGE_FRAME_L48_MASK 0x0000FFFFFFFFFFFF
#define PAGE_FRAME_H4_WIDTH_BITS 4
#define VECTOR_MASK 0xFFFF
#define SHMEM_VF_RESET_STATE ((u32)-1)
#define SMC_MSG_TYPE_ESTABLISH_HWC 1
#define SMC_MSG_TYPE_ESTABLISH_HWC_VERSION 0
#define SMC_MSG_TYPE_DESTROY_HWC 2
#define SMC_MSG_TYPE_DESTROY_HWC_VERSION 0
#define SMC_MSG_DIRECTION_REQUEST 0
#define SMC_MSG_DIRECTION_RESPONSE 1
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
*/
/* Shared memory channel protocol header
*
* msg_type: set on request and response; response matches request.
* msg_version: newer PF writes back older response (matching request)
* older PF acts on latest version known and sets that version in result
* (less than request).
* direction: 0 for request, VF->PF; 1 for response, PF->VF.
* status: 0 on request,
* operation result on response (success = 0, failure = 1 or greater).
* reset_vf: If set on either establish or destroy request, indicates perform
* FLR before/after the operation.
* owner_is_pf: 1 indicates PF owned, 0 indicates VF owned.
*/
union smc_proto_hdr {
u32 as_uint32;
struct {
u8 msg_type : 3;
u8 msg_version : 3;
u8 reserved_1 : 1;
u8 direction : 1;
u8 status;
u8 reserved_2;
u8 reset_vf : 1;
u8 reserved_3 : 6;
u8 owner_is_pf : 1;
};
}; /* HW DATA */
#define SMC_APERTURE_BITS 256
#define SMC_BASIC_UNIT (sizeof(u32))
#define SMC_APERTURE_DWORDS (SMC_APERTURE_BITS / (SMC_BASIC_UNIT * 8))
#define SMC_LAST_DWORD (SMC_APERTURE_DWORDS - 1)
static int mana_smc_poll_register(void __iomem *base, bool reset)
{
void __iomem *ptr = base + SMC_LAST_DWORD * SMC_BASIC_UNIT;
u32 last_dword;
int i;
/* Poll the hardware for the ownership bit. This should be pretty fast,
* but let's do it in a loop just in case the hardware or the PF
* driver are temporarily busy.
*/
for (i = 0; i < 20 * 1000; i++) {
last_dword = readl(ptr);
/* shmem reads as 0xFFFFFFFF in the reset case */
if (reset && last_dword == SHMEM_VF_RESET_STATE)
return 0;
/* If bit_31 is set, the PF currently owns the SMC. */
if (!(last_dword & BIT(31)))
return 0;
usleep_range(1000, 2000);
}
return -ETIMEDOUT;
}
static int mana_smc_read_response(struct shm_channel *sc, u32 msg_type,
u32 msg_version, bool reset_vf)
{
void __iomem *base = sc->base;
union smc_proto_hdr hdr;
int err;
/* Wait for PF to respond. */
err = mana_smc_poll_register(base, reset_vf);
if (err)
return err;
hdr.as_uint32 = readl(base + SMC_LAST_DWORD * SMC_BASIC_UNIT);
if (reset_vf && hdr.as_uint32 == SHMEM_VF_RESET_STATE)
return 0;
/* Validate protocol fields from the PF driver */
if (hdr.msg_type != msg_type || hdr.msg_version > msg_version ||
hdr.direction != SMC_MSG_DIRECTION_RESPONSE) {
dev_err(sc->dev, "Wrong SMC response 0x%x, type=%d, ver=%d\n",
hdr.as_uint32, msg_type, msg_version);
return -EPROTO;
}
/* Validate the operation result */
if (hdr.status != 0) {
dev_err(sc->dev, "SMC operation failed: 0x%x\n", hdr.status);
return -EPROTO;
}
return 0;
}
void mana_smc_init(struct shm_channel *sc, struct device *dev,
void __iomem *base)
{
sc->dev = dev;
sc->base = base;
}
int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
u64 cq_addr, u64 rq_addr, u64 sq_addr,
u32 eq_msix_index)
{
union smc_proto_hdr *hdr;
u16 all_addr_h4bits = 0;
u16 frame_addr_seq = 0;
u64 frame_addr = 0;
u8 shm_buf[32];
u64 *shmem;
u32 *dword;
u8 *ptr;
int err;
int i;
/* Ensure VF already has possession of shared memory */
err = mana_smc_poll_register(sc->base, false);
if (err) {
dev_err(sc->dev, "Timeout when setting up HWC: %d\n", err);
return err;
}
if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
!PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
return -EINVAL;
if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
return -EINVAL;
/* Scheme for packing four addresses and extra info into 256 bits.
*
* Addresses must be page frame aligned, so only frame address bits
* are transferred.
*
* 52-bit frame addresses are split into the lower 48 bits and upper
* 4 bits. Lower 48 bits of 4 address are written sequentially from
* the start of the 256-bit shared memory region followed by 16 bits
* containing the upper 4 bits of the 4 addresses in sequence.
*
* A 16 bit EQ vector number fills out the next-to-last 32-bit dword.
*
* The final 32-bit dword is used for protocol control information as
* defined in smc_proto_hdr.
*/
memset(shm_buf, 0, sizeof(shm_buf));
ptr = shm_buf;
/* EQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(eq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
ptr += PAGE_FRAME_L48_WIDTH_BYTES;
/* CQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(cq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
ptr += PAGE_FRAME_L48_WIDTH_BYTES;
/* RQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(rq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
ptr += PAGE_FRAME_L48_WIDTH_BYTES;
/* SQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(sq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
ptr += PAGE_FRAME_L48_WIDTH_BYTES;
/* High 4 bits of the four frame addresses */
*((u16 *)ptr) = all_addr_h4bits;
ptr += sizeof(u16);
/* EQ MSIX vector number */
*((u16 *)ptr) = (u16)eq_msix_index;
ptr += sizeof(u16);
/* 32-bit protocol header in final dword */
*((u32 *)ptr) = 0;
hdr = (union smc_proto_hdr *)ptr;
hdr->msg_type = SMC_MSG_TYPE_ESTABLISH_HWC;
hdr->msg_version = SMC_MSG_TYPE_ESTABLISH_HWC_VERSION;
hdr->direction = SMC_MSG_DIRECTION_REQUEST;
hdr->reset_vf = reset_vf;
/* Write 256-message buffer to shared memory (final 32-bit write
* triggers HW to set possession bit to PF).
*/
dword = (u32 *)shm_buf;
for (i = 0; i < SMC_APERTURE_DWORDS; i++)
writel(*dword++, sc->base + i * SMC_BASIC_UNIT);
/* Read shmem response (polling for VF possession) and validate.
* For setup, waiting for response on shared memory is not strictly
* necessary, since wait occurs later for results to appear in EQE's.
*/
err = mana_smc_read_response(sc, SMC_MSG_TYPE_ESTABLISH_HWC,
SMC_MSG_TYPE_ESTABLISH_HWC_VERSION,
reset_vf);
if (err) {
dev_err(sc->dev, "Error when setting up HWC: %d\n", err);
return err;
}
return 0;
}
int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf)
{
union smc_proto_hdr hdr = {};
int err;
/* Ensure already has possession of shared memory */
err = mana_smc_poll_register(sc->base, false);
if (err) {
dev_err(sc->dev, "Timeout when tearing down HWC\n");
return err;
}
/* Set up protocol header for HWC destroy message */
hdr.msg_type = SMC_MSG_TYPE_DESTROY_HWC;
hdr.msg_version = SMC_MSG_TYPE_DESTROY_HWC_VERSION;
hdr.direction = SMC_MSG_DIRECTION_REQUEST;
hdr.reset_vf = reset_vf;
/* Write message in high 32 bits of 256-bit shared memory, causing HW
* to set possession bit to PF.
*/
writel(hdr.as_uint32, sc->base + SMC_LAST_DWORD * SMC_BASIC_UNIT);
/* Read shmem response (polling for VF possession) and validate.
* For teardown, waiting for response is required to ensure hardware
* invalidates MST entries before software frees memory.
*/
err = mana_smc_read_response(sc, SMC_MSG_TYPE_DESTROY_HWC,
SMC_MSG_TYPE_DESTROY_HWC_VERSION,
reset_vf);
if (err) {
dev_err(sc->dev, "Error when tearing down HWC: %d\n", err);
return err;
}
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright (c) 2021, Microsoft Corporation. */
#ifndef _SHM_CHANNEL_H
#define _SHM_CHANNEL_H
struct shm_channel {
struct device *dev;
void __iomem *base;
};
void mana_smc_init(struct shm_channel *sc, struct device *dev,
void __iomem *base);
int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
u64 cq_addr, u64 rq_addr, u64 sq_addr,
u32 eq_msix_index);
int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf);
#endif /* _SHM_CHANNEL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment