Commit 162f17b2 authored by Karol Wachowski's avatar Karol Wachowski Committed by Stanislaw Gruszka

accel/ivpu: Refactor memory ranges logic

Add new dma range and change naming convention for virtual address
memory ranges managed by KMD.

New available ranges are named as follows:
 * global range - global context accessible by FW
 * aliased range - user context accessible by FW
 * dma range - user context accessible by DMA
 * shave range - user context accessible by shaves
 * global shave range - global context accessible by shave nn
Signed-off-by: default avatarKarol Wachowski <karol.wachowski@linux.intel.com>
Reviewed-by: default avatarStanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
Signed-off-by: default avatarStanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230731161258.2987564-6-stanislaw.gruszka@linux.intel.com
parent aa5f04d2
...@@ -122,7 +122,7 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param ...@@ -122,7 +122,7 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
args->value = 0; args->value = 0;
break; break;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE: case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
args->value = 0; args->value = 1;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -160,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f ...@@ -160,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = ivpu_get_context_count(vdev); args->value = ivpu_get_context_count(vdev);
break; break;
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user_low.start; args->value = vdev->hw->ranges.user.start;
break; break;
case DRM_IVPU_PARAM_CONTEXT_PRIORITY: case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
args->value = file_priv->priority; args->value = file_priv->priority;
......
...@@ -204,7 +204,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev) ...@@ -204,7 +204,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
return -EINVAL; return -EINVAL;
} }
ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size); ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
return 0; return 0;
} }
...@@ -245,7 +245,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev) ...@@ -245,7 +245,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
} }
if (fw->shave_nn_size) { if (fw->shave_nn_size) {
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start, fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
fw->shave_nn_size, DRM_IVPU_BO_UNCACHED); fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
if (!fw->mem_shave_nn) { if (!fw->mem_shave_nn) {
ivpu_err(vdev, "Failed to allocate shavenn buffer\n"); ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
...@@ -443,9 +443,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params ...@@ -443,9 +443,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
* Uncached region of VPU address space, covers IPC buffers, job queues * Uncached region of VPU address space, covers IPC buffers, job queues
* and log buffers, programmable to L2$ Uncached by VPU MTRR * and log buffers, programmable to L2$ Uncached by VPU MTRR
*/ */
boot_params->shared_region_base = vdev->hw->ranges.global_low.start; boot_params->shared_region_base = vdev->hw->ranges.global.start;
boot_params->shared_region_size = vdev->hw->ranges.global_low.end - boot_params->shared_region_size = vdev->hw->ranges.global.end -
vdev->hw->ranges.global_low.start; vdev->hw->ranges.global.start;
boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr; boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2; boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
...@@ -453,10 +453,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params ...@@ -453,10 +453,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2; boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2; boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
boot_params->global_aliased_pio_base = boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
vdev->hw->ranges.global_aliased_pio.start; boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
boot_params->global_aliased_pio_size =
ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
/* Allow configuration for L2C_PAGE_TABLE with boot param value */ /* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1; boot_params->autoconfig = 1;
...@@ -464,7 +462,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params ...@@ -464,7 +462,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
/* Enable L2 cache for first 2GB of high memory */ /* Enable L2 cache for first 2GB of high memory */
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1; boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start); ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
if (vdev->fw->mem_shave_nn) if (vdev->fw->mem_shave_nn)
boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr; boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
......
...@@ -279,10 +279,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ...@@ -279,10 +279,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
int ret; int ret;
if (!range) { if (!range) {
if (bo->flags & DRM_IVPU_BO_HIGH_MEM) if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
range = &vdev->hw->ranges.user_high; range = &vdev->hw->ranges.shave;
else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
range = &vdev->hw->ranges.dma;
else else
range = &vdev->hw->ranges.user_low; range = &vdev->hw->ranges.user;
} }
mutex_lock(&ctx->lock); mutex_lock(&ctx->lock);
...@@ -570,7 +572,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla ...@@ -570,7 +572,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
fixed_range.end = vpu_addr + size; fixed_range.end = vpu_addr + size;
range = &fixed_range; range = &fixed_range;
} else { } else {
range = &vdev->hw->ranges.global_low; range = &vdev->hw->ranges.global;
} }
bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0); bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
......
...@@ -38,11 +38,10 @@ struct ivpu_addr_range { ...@@ -38,11 +38,10 @@ struct ivpu_addr_range {
struct ivpu_hw_info { struct ivpu_hw_info {
const struct ivpu_hw_ops *ops; const struct ivpu_hw_ops *ops;
struct { struct {
struct ivpu_addr_range global_low; struct ivpu_addr_range global;
struct ivpu_addr_range global_high; struct ivpu_addr_range user;
struct ivpu_addr_range user_low; struct ivpu_addr_range shave;
struct ivpu_addr_range user_high; struct ivpu_addr_range dma;
struct ivpu_addr_range global_aliased_pio;
} ranges; } ranges;
struct { struct {
u8 min_ratio; u8 min_ratio;
......
...@@ -620,11 +620,10 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev) ...@@ -620,11 +620,10 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
ivpu_pll_init_frequency_ratios(vdev); ivpu_pll_init_frequency_ratios(vdev);
ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M); ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M); ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M);
ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M); ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G); ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
hw->ranges.global_aliased_pio = hw->ranges.user_low;
return 0; return 0;
} }
......
...@@ -431,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3 ...@@ -431,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
return ret; return ret;
if (!context_id) { if (!context_id) {
start = vdev->hw->ranges.global_low.start; start = vdev->hw->ranges.global.start;
end = vdev->hw->ranges.global_high.end; end = vdev->hw->ranges.shave.end;
} else { } else {
start = vdev->hw->ranges.user_low.start; start = vdev->hw->ranges.user.start;
end = vdev->hw->ranges.user_high.end; end = vdev->hw->ranges.dma.end;
} }
drm_mm_init(&ctx->mm, start, end - start); drm_mm_init(&ctx->mm, start, end - start);
......
...@@ -133,8 +133,10 @@ struct drm_ivpu_param { ...@@ -133,8 +133,10 @@ struct drm_ivpu_param {
__u64 value; __u64 value;
}; };
#define DRM_IVPU_BO_HIGH_MEM 0x00000001 #define DRM_IVPU_BO_SHAVE_MEM 0x00000001
#define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM
#define DRM_IVPU_BO_MAPPABLE 0x00000002 #define DRM_IVPU_BO_MAPPABLE 0x00000002
#define DRM_IVPU_BO_DMA_MEM 0x00000004
#define DRM_IVPU_BO_CACHED 0x00000000 #define DRM_IVPU_BO_CACHED 0x00000000
#define DRM_IVPU_BO_UNCACHED 0x00010000 #define DRM_IVPU_BO_UNCACHED 0x00010000
...@@ -144,6 +146,7 @@ struct drm_ivpu_param { ...@@ -144,6 +146,7 @@ struct drm_ivpu_param {
#define DRM_IVPU_BO_FLAGS \ #define DRM_IVPU_BO_FLAGS \
(DRM_IVPU_BO_HIGH_MEM | \ (DRM_IVPU_BO_HIGH_MEM | \
DRM_IVPU_BO_MAPPABLE | \ DRM_IVPU_BO_MAPPABLE | \
DRM_IVPU_BO_DMA_MEM | \
DRM_IVPU_BO_CACHE_MASK) DRM_IVPU_BO_CACHE_MASK)
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment