Commit 887c3820 authored by Salil Mehta's avatar Salil Mehta Committed by David S. Miller

net: hns3: Updates MSI/MSI-X alloc/free APIs(depricated) to new APIs

This patch migrates the HNS3 driver code from use of depricated PCI
MSI/MSI-X interrupt vector allocation/free APIs to new common APIs.
Signed-off-by: default avatarSalil Mehta <salil.mehta@huawei.com>
Suggested-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 97438abc
...@@ -891,14 +891,14 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) ...@@ -891,14 +891,14 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (hnae3_dev_roce_supported(hdev)) { if (hnae3_dev_roce_supported(hdev)) {
hdev->num_roce_msix = hdev->num_roce_msi =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* PF should have NIC vectors and Roce vectors, /* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors. * NIC vectors are queued before Roce vectors.
*/ */
hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET; hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
} else { } else {
hdev->num_msi = hdev->num_msi =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
...@@ -1950,7 +1950,7 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport) ...@@ -1950,7 +1950,7 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
struct hnae3_handle *roce = &vport->roce; struct hnae3_handle *roce = &vport->roce;
struct hnae3_handle *nic = &vport->nic; struct hnae3_handle *nic = &vport->nic;
roce->rinfo.num_vectors = vport->back->num_roce_msix; roce->rinfo.num_vectors = vport->back->num_roce_msi;
if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
vport->back->num_msi_left == 0) vport->back->num_msi_left == 0)
...@@ -1968,67 +1968,47 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport) ...@@ -1968,67 +1968,47 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
return 0; return 0;
} }
static int hclge_init_msix(struct hclge_dev *hdev) static int hclge_init_msi(struct hclge_dev *hdev)
{ {
struct pci_dev *pdev = hdev->pdev; struct pci_dev *pdev = hdev->pdev;
int ret, i; int vectors;
int i;
hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(struct msix_entry),
GFP_KERNEL);
if (!hdev->msix_entries)
return -ENOMEM;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
if (!hdev->vector_status)
return -ENOMEM;
for (i = 0; i < hdev->num_msi; i++) { vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
hdev->msix_entries[i].entry = i; PCI_IRQ_MSI | PCI_IRQ_MSIX);
hdev->vector_status[i] = HCLGE_INVALID_VPORT; if (vectors < 0) {
dev_err(&pdev->dev,
"failed(%d) to allocate MSI/MSI-X vectors\n",
vectors);
return vectors;
} }
if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev,
"requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors);
hdev->num_msi_left = hdev->num_msi; hdev->num_msi = vectors;
hdev->base_msi_vector = hdev->pdev->irq; hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector + hdev->roce_base_vector = hdev->base_msi_vector +
HCLGE_ROCE_VECTOR_OFFSET; HCLGE_ROCE_VECTOR_OFFSET;
ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries,
hdev->num_msi, hdev->num_msi);
if (ret < 0) {
dev_info(&hdev->pdev->dev,
"MSI-X vector alloc failed: %d\n", ret);
return ret;
}
return 0;
}
static int hclge_init_msi(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int vectors;
int i;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL); sizeof(u16), GFP_KERNEL);
if (!hdev->vector_status) if (!hdev->vector_status) {
pci_free_irq_vectors(pdev);
return -ENOMEM; return -ENOMEM;
}
for (i = 0; i < hdev->num_msi; i++) for (i = 0; i < hdev->num_msi; i++)
hdev->vector_status[i] = HCLGE_INVALID_VPORT; hdev->vector_status[i] = HCLGE_INVALID_VPORT;
vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI); hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
if (vectors < 0) { sizeof(int), GFP_KERNEL);
dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors); if (!hdev->vector_irq) {
return -EINVAL; pci_free_irq_vectors(pdev);
return -ENOMEM;
} }
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
HCLGE_ROCE_VECTOR_OFFSET;
return 0; return 0;
} }
...@@ -2704,6 +2684,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, ...@@ -2704,6 +2684,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
vport->vport_id * vport->vport_id *
HCLGE_VECTOR_VF_OFFSET; HCLGE_VECTOR_VF_OFFSET;
hdev->vector_status[i] = vport->vport_id; hdev->vector_status[i] = vport->vport_id;
hdev->vector_irq[i] = vector->vector;
vector++; vector++;
alloc++; alloc++;
...@@ -2722,15 +2703,10 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) ...@@ -2722,15 +2703,10 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
{ {
int i; int i;
for (i = 0; i < hdev->num_msi; i++) { for (i = 0; i < hdev->num_msi; i++)
if (hdev->msix_entries) { if (vector == hdev->vector_irq[i])
if (vector == hdev->msix_entries[i].vector)
return i;
} else {
if (vector == (hdev->base_msi_vector + i))
return i; return i;
}
}
return -EINVAL; return -EINVAL;
} }
...@@ -4664,14 +4640,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) ...@@ -4664,14 +4640,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{ {
struct pci_dev *pdev = hdev->pdev; struct pci_dev *pdev = hdev->pdev;
if (hdev->flag & HCLGE_FLAG_USE_MSIX) { pci_free_irq_vectors(pdev);
pci_disable_msix(pdev);
devm_kfree(&pdev->dev, hdev->msix_entries);
hdev->msix_entries = NULL;
} else {
pci_disable_msi(pdev);
}
pci_clear_master(pdev); pci_clear_master(pdev);
pci_release_mem_regions(pdev); pci_release_mem_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
...@@ -4689,7 +4658,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -4689,7 +4658,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_hclge_dev; goto err_hclge_dev;
} }
hdev->flag |= HCLGE_FLAG_USE_MSIX;
hdev->pdev = pdev; hdev->pdev = pdev;
hdev->ae_dev = ae_dev; hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET; hdev->reset_type = HNAE3_NONE_RESET;
...@@ -4726,12 +4694,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -4726,12 +4694,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret; return ret;
} }
if (hdev->flag & HCLGE_FLAG_USE_MSIX)
ret = hclge_init_msix(hdev);
else
ret = hclge_init_msi(hdev); ret = hclge_init_msi(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret); dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
return ret; return ret;
} }
......
...@@ -425,9 +425,6 @@ struct hclge_dev { ...@@ -425,9 +425,6 @@ struct hclge_dev {
u16 num_tqps; /* Num task queue pairs of this PF */ u16 num_tqps; /* Num task queue pairs of this PF */
u16 num_req_vfs; /* Num VFs requested for this PF */ u16 num_req_vfs; /* Num VFs requested for this PF */
u16 num_roce_msix; /* Num of roce vectors for this PF */
int roce_base_vector;
/* Base task tqp physical id of this PF */ /* Base task tqp physical id of this PF */
u16 base_tqp_pid; u16 base_tqp_pid;
u16 alloc_rss_size; /* Allocated RSS task queue */ u16 alloc_rss_size; /* Allocated RSS task queue */
...@@ -457,8 +454,10 @@ struct hclge_dev { ...@@ -457,8 +454,10 @@ struct hclge_dev {
u16 num_msi_left; u16 num_msi_left;
u16 num_msi_used; u16 num_msi_used;
u32 base_msi_vector; u32 base_msi_vector;
struct msix_entry *msix_entries;
u16 *vector_status; u16 *vector_status;
int *vector_irq;
u16 num_roce_msi; /* Num of roce vectors for this PF */
int roce_base_vector;
u16 pending_udp_bitmap; u16 pending_udp_bitmap;
...@@ -482,12 +481,10 @@ struct hclge_dev { ...@@ -482,12 +481,10 @@ struct hclge_dev {
struct hnae3_client *nic_client; struct hnae3_client *nic_client;
struct hnae3_client *roce_client; struct hnae3_client *roce_client;
#define HCLGE_FLAG_USE_MSI 0x00000001 #define HCLGE_FLAG_MAIN BIT(0)
#define HCLGE_FLAG_USE_MSIX 0x00000002 #define HCLGE_FLAG_DCB_CAPABLE BIT(1)
#define HCLGE_FLAG_MAIN 0x00000004 #define HCLGE_FLAG_DCB_ENABLE BIT(2)
#define HCLGE_FLAG_DCB_CAPABLE 0x00000008 #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
#define HCLGE_FLAG_DCB_ENABLE 0x00000010
#define HCLGE_FLAG_MQPRIO_ENABLE 0x00000020
u32 flag; u32 flag;
u32 pkt_buf_size; /* Total pf buf size for tx/rx */ u32 pkt_buf_size; /* Total pf buf size for tx/rx */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment