Commit c6e03dbe authored by David S. Miller's avatar David S. Miller

Merge branch 'mana-misc'

Dexuan Cui says:

====================
net: mana: some misc patches

Patch 1 is a small fix.

Patch 2 reports OS info to the PF driver.
Before the patch, the req fields were all zeros.

Patch 3 fixes and cleans up the error handling of HWC creation failure.

Patch 4 adds the callbacks for hibernation/kexec. It's based on patch 3.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 986d2e3d 635096a8
......@@ -3,6 +3,8 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/version.h>
#include "mana.h"
......@@ -848,6 +850,15 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
req.drv_ver = 0; /* Unused*/
req.os_type = 0x10; /* Linux */
req.os_ver_major = LINUX_VERSION_MAJOR;
req.os_ver_minor = LINUX_VERSION_PATCHLEVEL;
req.os_ver_build = LINUX_VERSION_SUBLEVEL;
strscpy(req.os_ver_str1, utsname()->sysname, sizeof(req.os_ver_str1));
strscpy(req.os_ver_str2, utsname()->release, sizeof(req.os_ver_str2));
strscpy(req.os_ver_str3, utsname()->version, sizeof(req.os_ver_str3));
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
......@@ -1247,6 +1258,52 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
gc->irq_contexts = NULL;
}
static int mana_gd_setup(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
int err;
mana_gd_init_registers(pdev);
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
err = mana_gd_setup_irqs(pdev);
if (err)
return err;
err = mana_hwc_create_channel(gc);
if (err)
goto remove_irq;
err = mana_gd_verify_vf_version(pdev);
if (err)
goto destroy_hwc;
err = mana_gd_query_max_resources(pdev);
if (err)
goto destroy_hwc;
err = mana_gd_detect_devices(pdev);
if (err)
goto destroy_hwc;
return 0;
destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
return err;
}
static void mana_gd_cleanup(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
}
static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct gdma_context *gc;
......@@ -1276,6 +1333,9 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!gc)
goto release_region;
mutex_init(&gc->eq_test_event_mutex);
pci_set_drvdata(pdev, gc);
bar0_va = pci_iomap(pdev, bar, 0);
if (!bar0_va)
goto free_gc;
......@@ -1283,49 +1343,23 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
pci_set_drvdata(pdev, gc);
mana_gd_init_registers(pdev);
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
err = mana_gd_setup_irqs(pdev);
err = mana_gd_setup(pdev);
if (err)
goto unmap_bar;
mutex_init(&gc->eq_test_event_mutex);
err = mana_hwc_create_channel(gc);
if (err)
goto remove_irq;
err = mana_gd_verify_vf_version(pdev);
if (err)
goto remove_irq;
err = mana_gd_query_max_resources(pdev);
if (err)
goto remove_irq;
err = mana_gd_detect_devices(pdev);
err = mana_probe(&gc->mana, false);
if (err)
goto remove_irq;
err = mana_probe(&gc->mana);
if (err)
goto clean_up_gdma;
goto cleanup_gd;
return 0;
clean_up_gdma:
mana_hwc_destroy_channel(gc);
vfree(gc->cq_table);
gc->cq_table = NULL;
remove_irq:
mana_gd_remove_irqs(pdev);
cleanup_gd:
mana_gd_cleanup(pdev);
unmap_bar:
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
vfree(gc);
release_region:
pci_release_regions(pdev);
......@@ -1340,13 +1374,9 @@ static void mana_gd_remove(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
mana_remove(&gc->mana);
mana_remove(&gc->mana, false);
mana_hwc_destroy_channel(gc);
vfree(gc->cq_table);
gc->cq_table = NULL;
mana_gd_remove_irqs(pdev);
mana_gd_cleanup(pdev);
pci_iounmap(pdev, gc->bar0_va);
......@@ -1357,6 +1387,52 @@ static void mana_gd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
/* The 'state' parameter is not used. */
static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
return 0;
}
/* In case the NIC hardware stops working, the suspend and resume callbacks will
* fail -- if this happens, it's safer to just report an error than try to undo
* what has been done.
*/
static int mana_gd_resume(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
int err;
err = mana_gd_setup(pdev);
if (err)
return err;
err = mana_probe(&gc->mana, true);
if (err)
return err;
return 0;
}
/* Quiesce the device for kexec. This is also called upon reboot/shutdown. */
static void mana_gd_shutdown(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "Shutdown was calledd\n");
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
pci_disable_device(pdev);
}
#ifndef PCI_VENDOR_ID_MICROSOFT
#define PCI_VENDOR_ID_MICROSOFT 0x1414
#endif
......@@ -1371,6 +1447,9 @@ static struct pci_driver mana_driver = {
.id_table = mana_id_table,
.probe = mana_gd_probe,
.remove = mana_gd_remove,
.suspend = mana_gd_suspend,
.resume = mana_gd_resume,
.shutdown = mana_gd_shutdown,
};
module_pci_driver(mana_driver);
......
......@@ -309,9 +309,6 @@ static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
{
if (!hwc_cq)
return;
kfree(hwc_cq->comp_buf);
if (hwc_cq->gdma_cq)
......@@ -446,9 +443,6 @@ static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
struct hwc_wq *hwc_wq)
{
if (!hwc_wq)
return;
mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
if (hwc_wq->gdma_wq)
......@@ -621,6 +615,7 @@ static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
/* Both were set in mana_hwc_init_event_handler(). */
if (WARN_ON(cq->id >= gc->max_num_cqs))
return -EPROTO;
......@@ -636,9 +631,6 @@ static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
u32 max_req_msg_size, u32 max_resp_msg_size)
{
struct hwc_wq *hwc_rxq = NULL;
struct hwc_wq *hwc_txq = NULL;
struct hwc_cq *hwc_cq = NULL;
int err;
err = mana_hwc_init_inflight_msg(hwc, q_depth);
......@@ -651,44 +643,32 @@ static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
err = mana_hwc_create_cq(hwc, q_depth * 2,
mana_hwc_init_event_handler, hwc,
mana_hwc_rx_event_handler, hwc,
mana_hwc_tx_event_handler, hwc, &hwc_cq);
mana_hwc_tx_event_handler, hwc, &hwc->cq);
if (err) {
dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
goto out;
}
hwc->cq = hwc_cq;
err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
hwc_cq, &hwc_rxq);
hwc->cq, &hwc->rxq);
if (err) {
dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
goto out;
}
hwc->rxq = hwc_rxq;
err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
hwc_cq, &hwc_txq);
hwc->cq, &hwc->txq);
if (err) {
dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
goto out;
}
hwc->txq = hwc_txq;
hwc->num_inflight_msg = q_depth;
hwc->max_req_msg_size = max_req_msg_size;
return 0;
out:
if (hwc_txq)
mana_hwc_destroy_wq(hwc, hwc_txq);
if (hwc_rxq)
mana_hwc_destroy_wq(hwc, hwc_rxq);
if (hwc_cq)
mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
mana_gd_free_res_map(&hwc->inflight_msg_res);
/* mana_hwc_create_channel() will do the cleanup.*/
return err;
}
......@@ -716,6 +696,9 @@ int mana_hwc_create_channel(struct gdma_context *gc)
gd->pdid = INVALID_PDID;
gd->doorbell = INVALID_DOORBELL;
/* mana_hwc_init_queues() only creates the required data structures,
* and doesn't touch the HWC device.
*/
err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
HW_CHANNEL_MAX_REQUEST_SIZE,
HW_CHANNEL_MAX_RESPONSE_SIZE);
......@@ -741,42 +724,50 @@ int mana_hwc_create_channel(struct gdma_context *gc)
return 0;
out:
kfree(hwc);
mana_hwc_destroy_channel(gc);
return err;
}
void mana_hwc_destroy_channel(struct gdma_context *gc)
{
struct hw_channel_context *hwc = gc->hwc.driver_data;
struct hwc_caller_ctx *ctx;
mana_smc_teardown_hwc(&gc->shm_channel, false);
if (!hwc)
return;
/* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
* non-zero, the HWC worked and we should tear down the HWC here.
*/
if (gc->max_num_cqs > 0) {
mana_smc_teardown_hwc(&gc->shm_channel, false);
gc->max_num_cqs = 0;
}
ctx = hwc->caller_ctx;
kfree(ctx);
kfree(hwc->caller_ctx);
hwc->caller_ctx = NULL;
mana_hwc_destroy_wq(hwc, hwc->txq);
hwc->txq = NULL;
if (hwc->txq)
mana_hwc_destroy_wq(hwc, hwc->txq);
mana_hwc_destroy_wq(hwc, hwc->rxq);
hwc->rxq = NULL;
if (hwc->rxq)
mana_hwc_destroy_wq(hwc, hwc->rxq);
mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
hwc->cq = NULL;
if (hwc->cq)
mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
mana_gd_free_res_map(&hwc->inflight_msg_res);
hwc->num_inflight_msg = 0;
if (hwc->gdma_dev->pdid != INVALID_PDID) {
hwc->gdma_dev->doorbell = INVALID_DOORBELL;
hwc->gdma_dev->pdid = INVALID_PDID;
}
hwc->gdma_dev->doorbell = INVALID_DOORBELL;
hwc->gdma_dev->pdid = INVALID_PDID;
kfree(hwc);
gc->hwc.driver_data = NULL;
gc->hwc.gdma_context = NULL;
vfree(gc->cq_table);
gc->cq_table = NULL;
}
int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
......
......@@ -374,8 +374,8 @@ int mana_alloc_queues(struct net_device *ndev);
int mana_attach(struct net_device *ndev);
int mana_detach(struct net_device *ndev, bool from_close);
int mana_probe(struct gdma_dev *gd);
void mana_remove(struct gdma_dev *gd);
int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
extern const struct ethtool_ops mana_ethtool_ops;
......
......@@ -1599,7 +1599,8 @@ static int mana_init_port(struct net_device *ndev)
err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
&num_indirect_entries);
if (err) {
netdev_err(ndev, "Failed to query info for vPort 0\n");
netdev_err(ndev, "Failed to query info for vPort %d\n",
port_idx);
goto reset_apc;
}
......@@ -1827,11 +1828,12 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
return err;
}
int mana_probe(struct gdma_dev *gd)
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
struct mana_context *ac;
u16 num_ports = 0;
int err;
int i;
......@@ -1843,44 +1845,70 @@ int mana_probe(struct gdma_dev *gd)
if (err)
return err;
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return -ENOMEM;
if (!resuming) {
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return -ENOMEM;
ac->gdma_dev = gd;
ac->num_ports = 1;
gd->driver_data = ac;
ac->gdma_dev = gd;
gd->driver_data = ac;
}
err = mana_create_eq(ac);
if (err)
goto out;
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
MANA_MICRO_VERSION, &ac->num_ports);
MANA_MICRO_VERSION, &num_ports);
if (err)
goto out;
if (!resuming) {
ac->num_ports = num_ports;
} else {
if (ac->num_ports != num_ports) {
dev_err(dev, "The number of vPorts changed: %d->%d\n",
ac->num_ports, num_ports);
err = -EPROTO;
goto out;
}
}
if (ac->num_ports == 0)
dev_err(dev, "Failed to detect any vPort\n");
if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
ac->num_ports = MAX_PORTS_IN_MANA_DEV;
for (i = 0; i < ac->num_ports; i++) {
err = mana_probe_port(ac, i, &ac->ports[i]);
if (err)
break;
if (!resuming) {
for (i = 0; i < ac->num_ports; i++) {
err = mana_probe_port(ac, i, &ac->ports[i]);
if (err)
break;
}
} else {
for (i = 0; i < ac->num_ports; i++) {
rtnl_lock();
err = mana_attach(ac->ports[i]);
rtnl_unlock();
if (err)
break;
}
}
out:
if (err)
mana_remove(gd);
mana_remove(gd, false);
return err;
}
void mana_remove(struct gdma_dev *gd)
void mana_remove(struct gdma_dev *gd, bool suspending)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
struct net_device *ndev;
int err;
int i;
for (i = 0; i < ac->num_ports; i++) {
......@@ -1896,7 +1924,16 @@ void mana_remove(struct gdma_dev *gd)
*/
rtnl_lock();
mana_detach(ndev, false);
err = mana_detach(ndev, false);
if (err)
netdev_err(ndev, "Failed to detach vPort %d: %d\n",
i, err);
if (suspending) {
/* No need to unregister the ndev. */
rtnl_unlock();
continue;
}
unregister_netdevice(ndev);
......@@ -1909,6 +1946,10 @@ void mana_remove(struct gdma_dev *gd)
out:
mana_gd_deregister_device(gd);
if (suspending)
return;
gd->driver_data = NULL;
gd->gdma_context = NULL;
kfree(ac);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment