Commit 2bc956ef authored by Jack Xiao's avatar Jack Xiao Committed by Alex Deucher

drm/amdgpu: add the per-context meta data v3

The per-context meta data is a per-context data structure associated
with a mes-managed hardware ring, which includes MCBP CSA, ring buffer
and etc.

v2: fix typo
v3: a. use structure instead of typedef
    b. move amdgpu_mes_ctx_get_offs_* to amdgpu_ring.h
    c. use __aligned to make alignement
Signed-off-by: default avatarJack Xiao <Jack.Xiao@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarHawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 80af9daa
......@@ -91,6 +91,7 @@
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
#include "amdgpu_mes_ctx.h"
#include "amdgpu_gart.h"
#include "amdgpu_debugfs.h"
#include "amdgpu_job.h"
......
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __AMDGPU_MES_CTX_H__
#define __AMDGPU_MES_CTX_H__
#include "v10_structs.h"
enum {
AMDGPU_MES_CTX_RPTR_OFFS = 0,
AMDGPU_MES_CTX_WPTR_OFFS,
AMDGPU_MES_CTX_FENCE_OFFS,
AMDGPU_MES_CTX_COND_EXE_OFFS,
AMDGPU_MES_CTX_TRAIL_FENCE_OFFS,
AMDGPU_MES_CTX_MAX_OFFS,
};
enum {
AMDGPU_MES_CTX_RING_OFFS = AMDGPU_MES_CTX_MAX_OFFS,
AMDGPU_MES_CTX_IB_OFFS,
AMDGPU_MES_CTX_PADDING_OFFS,
};
#define AMDGPU_MES_CTX_MAX_GFX_RINGS 1
#define AMDGPU_MES_CTX_MAX_COMPUTE_RINGS 4
#define AMDGPU_MES_CTX_MAX_SDMA_RINGS 2
#define AMDGPU_MES_CTX_MAX_RINGS \
(AMDGPU_MES_CTX_MAX_GFX_RINGS + \
AMDGPU_MES_CTX_MAX_COMPUTE_RINGS + \
AMDGPU_MES_CTX_MAX_SDMA_RINGS)
#define AMDGPU_CSA_SDMA_SIZE 64
#define GFX10_MEC_HPD_SIZE 2048
struct amdgpu_wb_slot {
uint32_t data[8];
};
struct amdgpu_mes_ctx_meta_data {
struct {
uint8_t ring[PAGE_SIZE * 4];
/* gfx csa */
struct v10_gfx_meta_data gfx_meta_data;
uint8_t gds_backup[64 * 1024];
struct amdgpu_wb_slot slots[AMDGPU_MES_CTX_MAX_OFFS];
/* only for ib test */
uint32_t ib[256] __aligned(256);
uint32_t padding[64];
} __aligned(PAGE_SIZE) gfx[AMDGPU_MES_CTX_MAX_GFX_RINGS];
struct {
uint8_t ring[PAGE_SIZE * 4];
uint8_t mec_hpd[GFX10_MEC_HPD_SIZE];
struct amdgpu_wb_slot slots[AMDGPU_MES_CTX_MAX_OFFS];
/* only for ib test */
uint32_t ib[256] __aligned(256);
uint32_t padding[64];
} __aligned(PAGE_SIZE) compute[AMDGPU_MES_CTX_MAX_COMPUTE_RINGS];
struct {
uint8_t ring[PAGE_SIZE * 4];
/* sdma csa for mcbp */
uint8_t sdma_meta_data[AMDGPU_CSA_SDMA_SIZE];
struct amdgpu_wb_slot slots[AMDGPU_MES_CTX_MAX_OFFS];
/* only for ib test */
uint32_t ib[256] __aligned(256);
uint32_t padding[64];
} __aligned(PAGE_SIZE) sdma[AMDGPU_MES_CTX_MAX_SDMA_RINGS];
};
struct amdgpu_mes_ctx_data {
struct amdgpu_bo *meta_data_obj;
uint64_t meta_data_gpu_addr;
struct amdgpu_bo_va *meta_data_va;
void *meta_data_ptr;
uint32_t gang_ids[AMDGPU_HW_IP_DMA+1];
};
#define AMDGPU_FENCE_MES_QUEUE_FLAG 0x1000000u
#define AMDGPU_FENCE_MES_QUEUE_ID_MASK (AMDGPU_FENCE_MES_QUEUE_FLAG - 1)
#endif
......@@ -364,6 +364,15 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
ring->count_dw -= count_dw;
}
#define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \
(ring->is_mes_queue && ring->mes_ctx ? \
(ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
#define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \
(ring->is_mes_queue && ring->mes_ctx ? \
(void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
NULL)
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment