Commit f9f83456 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman

powerpc/powernv/npu: Simplify DMA setup

NPU devices are emulated in firmware and mainly used for NPU NVLink
training; one NPU device is per a hardware link. Their DMA/TCE setup
must match the GPU which is connected via PCIe and NVLink so any changes
to the DMA/TCE setup on the GPU PCIe device need to be propagated to
the NVLink device as this is what device drivers expect and it doesn't
make much sense to do anything else.

This makes NPU DMA setup explicit.
pnv_npu_ioda_controller_ops::pnv_npu_dma_set_mask is moved to pci-ioda,
made static and prints warning as dma_set_mask() should never be called
on this function as in any case it will not configure GPU; so we make
this explicit.

Instead of using PNV_IODA_PE_PEER and peers[] (which the next patch will
remove), we test every PCI device if there are corresponding NVLink
devices. If there are any, we propagate bypass mode to just found NPU
devices by calling the setup helper directly (which takes @bypass) and
avoid guessing (i.e. calculating from DMA mask) whether we need bypass
or not on NPU devices. Since DMA setup happens in very rare occasion,
this will not slow down booting or VFIO start/stop much.

This renames pnv_npu_disable_bypass to pnv_npu_dma_set_32 to make it
more clear what the function really does which is programming 32bit
table address to the TVT ("disabling bypass" means writing zeroes to
the TVT).

This removes pnv_npu_dma_set_bypass() from pnv_npu_ioda_fixup() as
the DMA configuration on NPU does not matter until dma_set_mask() is
called on GPU and that will do the NPU DMA configuration.

This removes phb->dma_dev_setup initialization for NPU as
pnv_pci_ioda_dma_dev_setup is no-op for it anyway.

This stops using npe->tce_bypass_base as it never changes and values
other than zero are not supported.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Reviewed-by: default avatarAlistair Popple <alistair@popple.id.au>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 6969af73
...@@ -196,10 +196,9 @@ void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe) ...@@ -196,10 +196,9 @@ void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe)
} }
/* /*
* For the NPU we want to point the TCE table at the same table as the * Enables 32 bit DMA on NPU.
* real PCI device.
*/ */
static void pnv_npu_disable_bypass(struct pnv_ioda_pe *npe) static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
{ {
struct pnv_phb *phb = npe->phb; struct pnv_phb *phb = npe->phb;
struct pci_dev *gpdev; struct pci_dev *gpdev;
...@@ -235,72 +234,62 @@ static void pnv_npu_disable_bypass(struct pnv_ioda_pe *npe) ...@@ -235,72 +234,62 @@ static void pnv_npu_disable_bypass(struct pnv_ioda_pe *npe)
} }
/* /*
* Enable/disable bypass mode on the NPU. The NPU only supports one * Enables bypass mode on the NPU. The NPU only supports one
* window per link, so bypass needs to be explicitly enabled or * window per link, so bypass needs to be explicitly enabled or
* disabled. Unlike for a PHB3 bypass and non-bypass modes can't be * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
* active at the same time. * active at the same time.
*/ */
int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enable) static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
{ {
struct pnv_phb *phb = npe->phb; struct pnv_phb *phb = npe->phb;
int64_t rc = 0; int64_t rc = 0;
phys_addr_t top = memblock_end_of_DRAM();
if (phb->type != PNV_PHB_NPU || !npe->pdev) if (phb->type != PNV_PHB_NPU || !npe->pdev)
return -EINVAL; return -EINVAL;
if (enable) { /* Enable the bypass window */
/* Enable the bypass window */
phys_addr_t top = memblock_end_of_DRAM(); top = roundup_pow_of_two(top);
dev_info(&npe->pdev->dev, "Enabling bypass for PE %d\n",
npe->tce_bypass_base = 0; npe->pe_number);
top = roundup_pow_of_two(top); rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
dev_info(&npe->pdev->dev, "Enabling bypass for PE %d\n", npe->pe_number, npe->pe_number,
npe->pe_number); 0 /* bypass base */, top);
rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
npe->pe_number, npe->pe_number,
npe->tce_bypass_base, top);
} else {
/*
* Disable the bypass window by replacing it with the
* TCE32 window.
*/
pnv_npu_disable_bypass(npe);
}
return rc; return rc;
} }
int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask) void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass)
{ {
struct pci_controller *hose = pci_bus_to_host(npdev->bus); int i;
struct pnv_phb *phb = hose->private_data; struct pnv_phb *phb;
struct pci_dn *pdn = pci_get_pdn(npdev); struct pci_dn *pdn;
struct pnv_ioda_pe *npe, *gpe; struct pnv_ioda_pe *npe;
struct pci_dev *gpdev; struct pci_dev *npdev;
uint64_t top;
bool bypass = false;
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) for (i = 0; ; ++i) {
return -ENXIO; npdev = pnv_pci_get_npu_dev(gpdev, i);
/* We only do bypass if it's enabled on the linked device */ if (!npdev)
npe = &phb->ioda.pe_array[pdn->pe_number]; break;
gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
if (!gpe)
return -ENODEV;
if (gpe->tce_bypass_enabled) { pdn = pci_get_pdn(npdev);
top = gpe->tce_bypass_base + memblock_end_of_DRAM() - 1; if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
bypass = (dma_mask >= top); return;
}
if (bypass) phb = pci_bus_to_host(npdev->bus)->private_data;
dev_info(&npdev->dev, "Using 64-bit DMA iommu bypass\n");
else
dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
pnv_npu_dma_set_bypass(npe, bypass); /* We only do bypass if it's enabled on the linked device */
*npdev->dev.dma_mask = dma_mask; npe = &phb->ioda.pe_array[pdn->pe_number];
return 0; if (bypass) {
dev_info(&npdev->dev,
"Using 64-bit DMA iommu bypass\n");
pnv_npu_dma_set_bypass(npe);
} else {
dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
pnv_npu_dma_set_32(npe);
}
}
} }
...@@ -1669,8 +1669,6 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) ...@@ -1669,8 +1669,6 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
struct pnv_ioda_pe *pe; struct pnv_ioda_pe *pe;
uint64_t top; uint64_t top;
bool bypass = false; bool bypass = false;
struct pci_dev *linked_npu_dev;
int i;
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
return -ENODEV;; return -ENODEV;;
...@@ -1691,15 +1689,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) ...@@ -1691,15 +1689,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
*pdev->dev.dma_mask = dma_mask; *pdev->dev.dma_mask = dma_mask;
/* Update peer npu devices */ /* Update peer npu devices */
if (pe->flags & PNV_IODA_PE_PEER) pnv_npu_try_dma_set_bypass(pdev, bypass);
for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
if (!pe->peers[i])
continue;
linked_npu_dev = pe->peers[i]->pdev;
if (dma_get_mask(&linked_npu_dev->dev) != dma_mask)
dma_set_mask(&linked_npu_dev->dev, dma_mask);
}
return 0; return 0;
} }
...@@ -3194,7 +3184,6 @@ static void pnv_npu_ioda_fixup(void) ...@@ -3194,7 +3184,6 @@ static void pnv_npu_ioda_fixup(void)
enable_bypass = dma_get_mask(&pe->pdev->dev) == enable_bypass = dma_get_mask(&pe->pdev->dev) ==
DMA_BIT_MASK(64); DMA_BIT_MASK(64);
pnv_npu_init_dma_pe(pe); pnv_npu_init_dma_pe(pe);
pnv_npu_dma_set_bypass(pe, enable_bypass);
} }
} }
} }
...@@ -3340,6 +3329,14 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { ...@@ -3340,6 +3329,14 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.shutdown = pnv_pci_ioda_shutdown, .shutdown = pnv_pci_ioda_shutdown,
}; };
static int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask)
{
dev_err_once(&npdev->dev,
"%s operation unsupported for NVLink devices\n",
__func__);
return -EPERM;
}
static const struct pci_controller_ops pnv_npu_ioda_controller_ops = { static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
.dma_dev_setup = pnv_pci_dma_dev_setup, .dma_dev_setup = pnv_pci_dma_dev_setup,
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
...@@ -3516,9 +3513,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, ...@@ -3516,9 +3513,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->freeze_pe = pnv_ioda_freeze_pe; phb->freeze_pe = pnv_ioda_freeze_pe;
phb->unfreeze_pe = pnv_ioda_unfreeze_pe; phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
/* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
/* Setup MSI support */ /* Setup MSI support */
pnv_pci_init_ioda_msis(phb); pnv_pci_init_ioda_msis(phb);
...@@ -3531,10 +3525,12 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, ...@@ -3531,10 +3525,12 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
*/ */
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
if (phb->type == PNV_PHB_NPU) if (phb->type == PNV_PHB_NPU) {
hose->controller_ops = pnv_npu_ioda_controller_ops; hose->controller_ops = pnv_npu_ioda_controller_ops;
else } else {
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
hose->controller_ops = pnv_pci_ioda_controller_ops; hose->controller_ops = pnv_pci_ioda_controller_ops;
}
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
......
...@@ -220,8 +220,7 @@ extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); ...@@ -220,8 +220,7 @@ extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
/* Nvlink functions */ /* Nvlink functions */
extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe); extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe);
extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe); extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe);
extern int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enabled); extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
extern int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask);
extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm); extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
#endif /* __POWERNV_PCI_H */ #endif /* __POWERNV_PCI_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment