Commit beaf3ebf authored by Jacek Lawrynowicz's avatar Jacek Lawrynowicz Committed by Stanislaw Gruszka

accel/ivpu: Move MMU register definitions to ivpu_mmu.c

MMU registers are not platform specific so they should be defined
separate to platform regs.
Signed-off-by: default avatarJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Reviewed-by: default avatarStanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
Reviewed-by: default avatarJeffrey Hugo <quic_jhugo@quicinc.com>
Signed-off-by: default avatarStanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230901094957.168898-12-stanislaw.gruszka@linux.intel.com
parent c92ab361
......@@ -191,39 +191,6 @@
#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
#define VPU_37XX_HOST_MMU_IDR0 0x00200000u
#define VPU_37XX_HOST_MMU_IDR1 0x00200004u
#define VPU_37XX_HOST_MMU_IDR3 0x0020000cu
#define VPU_37XX_HOST_MMU_IDR5 0x00200014u
#define VPU_37XX_HOST_MMU_CR0 0x00200020u
#define VPU_37XX_HOST_MMU_CR0ACK 0x00200024u
#define VPU_37XX_HOST_MMU_CR1 0x00200028u
#define VPU_37XX_HOST_MMU_CR2 0x0020002cu
#define VPU_37XX_HOST_MMU_IRQ_CTRL 0x00200050u
#define VPU_37XX_HOST_MMU_IRQ_CTRLACK 0x00200054u
#define VPU_37XX_HOST_MMU_GERROR 0x00200060u
#define VPU_37XX_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
#define VPU_37XX_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
#define VPU_37XX_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
#define VPU_37XX_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
#define VPU_37XX_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
#define VPU_37XX_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
#define VPU_37XX_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
#define VPU_37XX_HOST_MMU_GERRORN 0x00200064u
#define VPU_37XX_HOST_MMU_STRTAB_BASE 0x00200080u
#define VPU_37XX_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
#define VPU_37XX_HOST_MMU_CMDQ_BASE 0x00200090u
#define VPU_37XX_HOST_MMU_CMDQ_PROD 0x00200098u
#define VPU_37XX_HOST_MMU_CMDQ_CONS 0x0020009cu
#define VPU_37XX_HOST_MMU_EVTQ_BASE 0x002000a0u
#define VPU_37XX_HOST_MMU_EVTQ_PROD 0x002000a8u
#define VPU_37XX_HOST_MMU_EVTQ_CONS 0x002000acu
#define VPU_37XX_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
#define VPU_37XX_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
......
......@@ -7,12 +7,45 @@
#include <linux/highmem.h>
#include "ivpu_drv.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
#include "ivpu_pm.h"
#define IVPU_MMU_REG_IDR0 0x00200000u
#define IVPU_MMU_REG_IDR1 0x00200004u
#define IVPU_MMU_REG_IDR3 0x0020000cu
#define IVPU_MMU_REG_IDR5 0x00200014u
#define IVPU_MMU_REG_CR0 0x00200020u
#define IVPU_MMU_REG_CR0ACK 0x00200024u
#define IVPU_MMU_REG_CR1 0x00200028u
#define IVPU_MMU_REG_CR2 0x0020002cu
#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
#define IVPU_MMU_REG_IRQ_CTRLACK 0x00200054u
#define IVPU_MMU_REG_GERROR 0x00200060u
#define IVPU_MMU_REG_GERROR_CMDQ_MASK BIT_MASK(0)
#define IVPU_MMU_REG_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
#define IVPU_MMU_REG_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
#define IVPU_MMU_REG_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
#define IVPU_MMU_REG_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
#define IVPU_MMU_REG_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
#define IVPU_MMU_REG_GERROR_MSI_ABT_MASK BIT_MASK(7)
#define IVPU_MMU_REG_GERRORN 0x00200064u
#define IVPU_MMU_REG_STRTAB_BASE 0x00200080u
#define IVPU_MMU_REG_STRTAB_BASE_CFG 0x00200088u
#define IVPU_MMU_REG_CMDQ_BASE 0x00200090u
#define IVPU_MMU_REG_CMDQ_PROD 0x00200098u
#define IVPU_MMU_REG_CMDQ_CONS 0x0020009cu
#define IVPU_MMU_REG_EVTQ_BASE 0x002000a0u
#define IVPU_MMU_REG_EVTQ_PROD 0x002000a8u
#define IVPU_MMU_REG_EVTQ_CONS 0x002000acu
#define IVPU_MMU_REG_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
#define IVPU_MMU_REG_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
#define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK GENMASK(30, 24)
#define IVPU_MMU_IDR0_REF 0x080f3e0f
#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
#define IVPU_MMU_IDR1_REF 0x0e739d18
......@@ -186,13 +219,13 @@
#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT)))
#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \
(REG_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT)) | \
(REG_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT)) | \
(REG_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT)) | \
(REG_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT)) | \
(REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \
(REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT)))
static char *ivpu_mmu_event_to_str(u32 cmd)
{
......@@ -250,15 +283,15 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else
val_ref = IVPU_MMU_IDR0_REF;
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0);
val = REGV_RD32(IVPU_MMU_REG_IDR0);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1);
val = REGV_RD32(IVPU_MMU_REG_IDR1);
if (val != IVPU_MMU_IDR1_REF)
ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3);
val = REGV_RD32(IVPU_MMU_REG_IDR3);
if (val != IVPU_MMU_IDR3_REF)
ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
......@@ -269,7 +302,7 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else
val_ref = IVPU_MMU_IDR5_REF;
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5);
val = REGV_RD32(IVPU_MMU_REG_IDR5);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
}
......@@ -396,18 +429,18 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
int ret;
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_IRQ_CTRL, 0);
if (ret)
return ret;
return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
return ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_IRQ_CTRL, irq_ctrl);
}
static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
{
struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
return REGV_POLL(IVPU_MMU_REG_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
IVPU_MMU_QUEUE_TIMEOUT_US);
}
......@@ -447,7 +480,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
return ret;
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod);
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
if (ret)
......@@ -495,7 +528,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, 0);
if (ret)
return ret;
......@@ -505,17 +538,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
REGV_WR32(VPU_37XX_HOST_MMU_CR1, val);
REGV_WR32(IVPU_MMU_REG_CR1, val);
REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q);
REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0);
REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q);
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
val = IVPU_MMU_CR0_CMDQEN;
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
if (ret)
return ret;
......@@ -531,17 +564,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret)
return ret;
REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0);
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0);
REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q);
REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
val |= IVPU_MMU_CR0_EVTQEN;
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
if (ret)
return ret;
val |= IVPU_MMU_CR0_ATSCHK;
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
if (ret)
return ret;
......@@ -550,7 +583,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
return ret;
val |= IVPU_MMU_CR0_SMMUEN;
return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
return ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val);
}
static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
......@@ -801,14 +834,14 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC);
evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
return NULL;
clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons);
return evt;
}
......@@ -841,35 +874,35 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
ivpu_dbg(vdev, IRQ, "MMU error\n");
gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR);
gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN);
gerror_val = REGV_RD32(IVPU_MMU_REG_GERROR);
gerrorn_val = REGV_RD32(IVPU_MMU_REG_GERRORN);
active = gerror_val ^ gerrorn_val;
if (!(active & IVPU_MMU_GERROR_ERR_MASK))
return;
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active))
if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active))
if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active))
if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active))
if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, CMDQ, active))
ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val);
REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val);
}
int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment