Commit 815b33fd authored by Joerg Roedel's avatar Joerg Roedel

x86/amd-iommu: Cleanup completion-wait handling

This patch cleans up the implementation of completion-wait
command sending. It also switches the completion indicator
from the MMIO bit to a memory store which can be checked
without IOMMU locking.
As a side effect this patch makes the __iommu_queue_command
function obsolete and so it is removed too.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 11b6402c
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/iommu-helper.h> #include <linux/iommu-helper.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/delay.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/gart.h> #include <asm/gart.h>
...@@ -34,7 +35,7 @@ ...@@ -34,7 +35,7 @@
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
#define EXIT_LOOP_COUNT 10000000 #define LOOP_TIMEOUT 100000
static DEFINE_RWLOCK(amd_iommu_devtable_lock); static DEFINE_RWLOCK(amd_iommu_devtable_lock);
...@@ -383,10 +384,14 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data) ...@@ -383,10 +384,14 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
* *
****************************************************************************/ ****************************************************************************/
static void build_completion_wait(struct iommu_cmd *cmd) static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
{ {
WARN_ON(address & 0x7ULL);
memset(cmd, 0, sizeof(*cmd)); memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = CMD_COMPL_WAIT_INT_MASK; cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(__pa(address));
cmd->data[2] = 1;
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
} }
...@@ -432,12 +437,14 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, ...@@ -432,12 +437,14 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
* Writes the command to the IOMMUs command buffer and informs the * Writes the command to the IOMMUs command buffer and informs the
* hardware about the new command. Must be called with iommu->lock held. * hardware about the new command. Must be called with iommu->lock held.
*/ */
static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
{ {
unsigned long flags;
u32 tail, head; u32 tail, head;
u8 *target; u8 *target;
WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
spin_lock_irqsave(&iommu->lock, flags);
tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
target = iommu->cmd_buf + tail; target = iommu->cmd_buf + tail;
memcpy_toio(target, cmd, sizeof(*cmd)); memcpy_toio(target, cmd, sizeof(*cmd));
...@@ -446,99 +453,41 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) ...@@ -446,99 +453,41 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
if (tail == head) if (tail == head)
return -ENOMEM; return -ENOMEM;
writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
iommu->need_sync = true;
return 0;
}
/*
* General queuing function for commands. Takes iommu->lock and calls
* __iommu_queue_command().
*/
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&iommu->lock, flags);
ret = __iommu_queue_command(iommu, cmd);
if (!ret)
iommu->need_sync = true;
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
return ret; return 0;
}
/*
* This function waits until an IOMMU has completed a completion
* wait command
*/
static void __iommu_wait_for_completion(struct amd_iommu *iommu)
{
int ready = 0;
unsigned status = 0;
unsigned long i = 0;
INC_STATS_COUNTER(compl_wait);
while (!ready && (i < EXIT_LOOP_COUNT)) {
++i;
/* wait for the bit to become one */
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
}
/* set bit back to zero */
status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
if (unlikely(i == EXIT_LOOP_COUNT))
iommu->reset_in_progress = true;
} }
/* /*
* This function queues a completion wait command into the command * This function queues a completion wait command into the command
* buffer of an IOMMU * buffer of an IOMMU
*/ */
static int __iommu_completion_wait(struct amd_iommu *iommu)
{
struct iommu_cmd cmd;
build_completion_wait(&cmd);
return __iommu_queue_command(iommu, &cmd);
}
/*
* This function is called whenever we need to ensure that the IOMMU has
* completed execution of all commands we sent. It sends a
* COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
* us about that by writing a value to a physical address we pass with
* the command.
*/
static int iommu_completion_wait(struct amd_iommu *iommu) static int iommu_completion_wait(struct amd_iommu *iommu)
{ {
int ret = 0; struct iommu_cmd cmd;
unsigned long flags; volatile u64 sem = 0;
int ret, i = 0;
spin_lock_irqsave(&iommu->lock, flags);
if (!iommu->need_sync) if (!iommu->need_sync)
goto out; return 0;
ret = __iommu_completion_wait(iommu);
iommu->need_sync = false; build_completion_wait(&cmd, (u64)&sem);
ret = iommu_queue_command(iommu, &cmd);
if (ret) if (ret)
goto out; return ret;
__iommu_wait_for_completion(iommu);
out: while (sem == 0 && i < LOOP_TIMEOUT) {
spin_unlock_irqrestore(&iommu->lock, flags); udelay(1);
i += 1;
}
if (iommu->reset_in_progress) if (i == LOOP_TIMEOUT) {
pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
iommu->reset_in_progress = true;
reset_iommu_command_buffer(iommu); reset_iommu_command_buffer(iommu);
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment