Commit 9e68447f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-3.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:
 "Two patches are in here which fix AMD IOMMU specific issues.  One
  patch fixes a long-standing warning on resume because the
  amd_iommu_resume function enabled interrupts.  The other patch fixes a
  deadlock in an error-path of the page-fault request handling code of
  the IOMMU driver.

* tag 'iommu-fixes-3.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/amd: Fix deadlock in ppr-handling error path
  iommu/amd: Cache pdev pointer to root-bridge
parents eea5b551 eee53537
...@@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu) ...@@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
{ {
struct amd_iommu_fault fault; struct amd_iommu_fault fault;
volatile u64 *raw;
int i;
INC_STATS_COUNTER(pri_requests); INC_STATS_COUNTER(pri_requests);
raw = (u64 *)(iommu->ppr_log + head);
/*
* Hardware bug: Interrupt may arrive before the entry is written to
* memory. If this happens we need to wait for the entry to arrive.
*/
for (i = 0; i < LOOP_TIMEOUT; ++i) {
if (PPR_REQ_TYPE(raw[0]) != 0)
break;
udelay(1);
}
if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
return; return;
...@@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) ...@@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
fault.tag = PPR_TAG(raw[0]); fault.tag = PPR_TAG(raw[0]);
fault.flags = PPR_FLAGS(raw[0]); fault.flags = PPR_FLAGS(raw[0]);
/*
* To detect the hardware bug we need to clear the entry
* to back to zero.
*/
raw[0] = raw[1] = 0;
atomic_notifier_call_chain(&ppr_notifier, 0, &fault); atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
} }
...@@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) ...@@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
if (iommu->ppr_log == NULL) if (iommu->ppr_log == NULL)
return; return;
/* enable ppr interrupts again */
writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
while (head != tail) { while (head != tail) {
volatile u64 *raw;
u64 entry[2];
int i;
/* Handle PPR entry */ raw = (u64 *)(iommu->ppr_log + head);
iommu_handle_ppr_entry(iommu, head);
/*
* Hardware bug: Interrupt may arrive before the entry is
* written to memory. If this happens we need to wait for the
* entry to arrive.
*/
for (i = 0; i < LOOP_TIMEOUT; ++i) {
if (PPR_REQ_TYPE(raw[0]) != 0)
break;
udelay(1);
}
/* Avoid memcpy function-call overhead */
entry[0] = raw[0];
entry[1] = raw[1];
/* Update and refresh ring-buffer state*/ /*
* To detect the hardware bug we need to clear the entry
* back to zero.
*/
raw[0] = raw[1] = 0UL;
/* Update head pointer of hardware ring-buffer */
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
/*
* Release iommu->lock because ppr-handling might need to
* re-aquire it
*/
spin_unlock_irqrestore(&iommu->lock, flags);
/* Handle PPR entry */
iommu_handle_ppr_entry(iommu, entry);
spin_lock_irqsave(&iommu->lock, flags);
/* Refresh ring-buffer information */
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
} }
/* enable ppr interrupts again */
writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
......
...@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->dev) if (!iommu->dev)
return 1; return 1;
iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
PCI_DEVFN(0, 0));
iommu->cap_ptr = h->cap_ptr; iommu->cap_ptr = h->cap_ptr;
iommu->pci_seg = h->pci_seg; iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys; iommu->mmio_phys = h->mmio_phys;
...@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) ...@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
{ {
int i, j; int i, j;
u32 ioc_feature_control; u32 ioc_feature_control;
struct pci_dev *pdev = NULL; struct pci_dev *pdev = iommu->root_pdev;
/* RD890 BIOSes may not have completely reconfigured the iommu */ /* RD890 BIOSes may not have completely reconfigured the iommu */
if (!is_rd890_iommu(iommu->dev)) if (!is_rd890_iommu(iommu->dev) || !pdev)
return; return;
/* /*
* First, we need to ensure that the iommu is enabled. This is * First, we need to ensure that the iommu is enabled. This is
* controlled by a register in the northbridge * controlled by a register in the northbridge
*/ */
pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
if (!pdev)
return;
/* Select Northbridge indirect register 0x75 and enable writing */ /* Select Northbridge indirect register 0x75 and enable writing */
pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
...@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) ...@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
if (!(ioc_feature_control & 0x1)) if (!(ioc_feature_control & 0x1))
pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
pci_dev_put(pdev);
/* Restore the iommu BAR */ /* Restore the iommu BAR */
pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
iommu->stored_addr_lo); iommu->stored_addr_lo);
......
...@@ -481,6 +481,9 @@ struct amd_iommu { ...@@ -481,6 +481,9 @@ struct amd_iommu {
/* Pointer to PCI device of this IOMMU */ /* Pointer to PCI device of this IOMMU */
struct pci_dev *dev; struct pci_dev *dev;
/* Cache pdev to root device for resume quirks */
struct pci_dev *root_pdev;
/* physical address of MMIO space */ /* physical address of MMIO space */
u64 mmio_phys; u64 mmio_phys;
/* virtual address of MMIO space */ /* virtual address of MMIO space */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment