Commit 45102864 authored by Lucas De Marchi's avatar Lucas De Marchi Committed by Rodrigo Vivi

drm/xe/pat: Prefer the arch/IP names

Both DG2 and PVC are derived from XeHP, but DG2 should not really
re-use something introduced by PVC, so it's odd to have DG2 re-using the
PVC programming for PAT. Let's prefer using the architecture and/or IP
names.
Reviewed-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Link: https://lore.kernel.org/r/20230927193902.2849159-8-lucas.demarchi@intel.comSigned-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 194bdb85
...@@ -14,57 +14,57 @@ ...@@ -14,57 +14,57 @@
0x4800, 0x4804, \ 0x4800, 0x4804, \
0x4848, 0x484c) 0x4848, 0x484c)
#define MTL_L4_POLICY_MASK REG_GENMASK(3, 2) #define XELPG_L4_POLICY_MASK REG_GENMASK(3, 2)
#define MTL_PAT_3_UC REG_FIELD_PREP(MTL_L4_POLICY_MASK, 3) #define XELPG_PAT_3_UC REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 3)
#define MTL_PAT_1_WT REG_FIELD_PREP(MTL_L4_POLICY_MASK, 1) #define XELPG_PAT_1_WT REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 1)
#define MTL_PAT_0_WB REG_FIELD_PREP(MTL_L4_POLICY_MASK, 0) #define XELPG_PAT_0_WB REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 0)
#define MTL_INDEX_COH_MODE_MASK REG_GENMASK(1, 0) #define XELPG_INDEX_COH_MODE_MASK REG_GENMASK(1, 0)
#define MTL_3_COH_2W REG_FIELD_PREP(MTL_INDEX_COH_MODE_MASK, 3) #define XELPG_3_COH_2W REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 3)
#define MTL_2_COH_1W REG_FIELD_PREP(MTL_INDEX_COH_MODE_MASK, 2) #define XELPG_2_COH_1W REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 2)
#define MTL_0_COH_NON REG_FIELD_PREP(MTL_INDEX_COH_MODE_MASK, 0) #define XELPG_0_COH_NON REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 0)
#define PVC_CLOS_LEVEL_MASK REG_GENMASK(3, 2) #define XEHPC_CLOS_LEVEL_MASK REG_GENMASK(3, 2)
#define PVC_PAT_CLOS(x) REG_FIELD_PREP(PVC_CLOS_LEVEL_MASK, x) #define XEHPC_PAT_CLOS(x) REG_FIELD_PREP(XEHPC_CLOS_LEVEL_MASK, x)
#define TGL_MEM_TYPE_MASK REG_GENMASK(1, 0) #define XELP_MEM_TYPE_MASK REG_GENMASK(1, 0)
#define TGL_PAT_WB REG_FIELD_PREP(TGL_MEM_TYPE_MASK, 3) #define XELP_PAT_WB REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 3)
#define TGL_PAT_WT REG_FIELD_PREP(TGL_MEM_TYPE_MASK, 2) #define XELP_PAT_WT REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 2)
#define TGL_PAT_WC REG_FIELD_PREP(TGL_MEM_TYPE_MASK, 1) #define XELP_PAT_WC REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 1)
#define TGL_PAT_UC REG_FIELD_PREP(TGL_MEM_TYPE_MASK, 0) #define XELP_PAT_UC REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 0)
struct xe_pat_ops { struct xe_pat_ops {
void (*program_graphics)(struct xe_gt *gt, const u32 table[], int n_entries); void (*program_graphics)(struct xe_gt *gt, const u32 table[], int n_entries);
void (*program_media)(struct xe_gt *gt, const u32 table[], int n_entries); void (*program_media)(struct xe_gt *gt, const u32 table[], int n_entries);
}; };
static const u32 tgl_pat_table[] = { static const u32 xelp_pat_table[] = {
[0] = TGL_PAT_WB, [0] = XELP_PAT_WB,
[1] = TGL_PAT_WC, [1] = XELP_PAT_WC,
[2] = TGL_PAT_WT, [2] = XELP_PAT_WT,
[3] = TGL_PAT_UC, [3] = XELP_PAT_UC,
[4] = TGL_PAT_WB, [4] = XELP_PAT_WB,
[5] = TGL_PAT_WB, [5] = XELP_PAT_WB,
[6] = TGL_PAT_WB, [6] = XELP_PAT_WB,
[7] = TGL_PAT_WB, [7] = XELP_PAT_WB,
}; };
static const u32 pvc_pat_table[] = { static const u32 xehpc_pat_table[] = {
[0] = TGL_PAT_UC, [0] = XELP_PAT_UC,
[1] = TGL_PAT_WC, [1] = XELP_PAT_WC,
[2] = TGL_PAT_WT, [2] = XELP_PAT_WT,
[3] = TGL_PAT_WB, [3] = XELP_PAT_WB,
[4] = PVC_PAT_CLOS(1) | TGL_PAT_WT, [4] = XEHPC_PAT_CLOS(1) | XELP_PAT_WT,
[5] = PVC_PAT_CLOS(1) | TGL_PAT_WB, [5] = XEHPC_PAT_CLOS(1) | XELP_PAT_WB,
[6] = PVC_PAT_CLOS(2) | TGL_PAT_WT, [6] = XEHPC_PAT_CLOS(2) | XELP_PAT_WT,
[7] = PVC_PAT_CLOS(2) | TGL_PAT_WB, [7] = XEHPC_PAT_CLOS(2) | XELP_PAT_WB,
}; };
static const u32 mtl_pat_table[] = { static const u32 xelpg_pat_table[] = {
[0] = MTL_PAT_0_WB, [0] = XELPG_PAT_0_WB,
[1] = MTL_PAT_1_WT, [1] = XELPG_PAT_1_WT,
[2] = MTL_PAT_3_UC, [2] = XELPG_PAT_3_UC,
[3] = MTL_PAT_0_WB | MTL_2_COH_1W, [3] = XELPG_PAT_0_WB | XELPG_2_COH_1W,
[4] = MTL_PAT_0_WB | MTL_3_COH_2W, [4] = XELPG_PAT_0_WB | XELPG_3_COH_2W,
}; };
static void program_pat(struct xe_gt *gt, const u32 table[], int n_entries) static void program_pat(struct xe_gt *gt, const u32 table[], int n_entries)
...@@ -85,11 +85,11 @@ static void program_pat_mcr(struct xe_gt *gt, const u32 table[], int n_entries) ...@@ -85,11 +85,11 @@ static void program_pat_mcr(struct xe_gt *gt, const u32 table[], int n_entries)
} }
} }
static const struct xe_pat_ops tgl_pat_ops = { static const struct xe_pat_ops xelp_pat_ops = {
.program_graphics = program_pat, .program_graphics = program_pat,
}; };
static const struct xe_pat_ops dg2_pat_ops = { static const struct xe_pat_ops xehp_pat_ops = {
.program_graphics = program_pat_mcr, .program_graphics = program_pat_mcr,
}; };
...@@ -97,7 +97,7 @@ static const struct xe_pat_ops dg2_pat_ops = { ...@@ -97,7 +97,7 @@ static const struct xe_pat_ops dg2_pat_ops = {
* SAMedia register offsets are adjusted by the write methods and they target * SAMedia register offsets are adjusted by the write methods and they target
* registers that are not MCR, while for normal GT they are MCR * registers that are not MCR, while for normal GT they are MCR
*/ */
static const struct xe_pat_ops mtl_pat_ops = { static const struct xe_pat_ops xelpg_pat_ops = {
.program_graphics = program_pat, .program_graphics = program_pat,
.program_media = program_pat_mcr, .program_media = program_pat_mcr,
}; };
...@@ -105,25 +105,25 @@ static const struct xe_pat_ops mtl_pat_ops = { ...@@ -105,25 +105,25 @@ static const struct xe_pat_ops mtl_pat_ops = {
void xe_pat_init_early(struct xe_device *xe) void xe_pat_init_early(struct xe_device *xe)
{ {
if (xe->info.platform == XE_METEORLAKE) { if (xe->info.platform == XE_METEORLAKE) {
xe->pat.ops = &mtl_pat_ops; xe->pat.ops = &xelpg_pat_ops;
xe->pat.table = mtl_pat_table; xe->pat.table = xelpg_pat_table;
xe->pat.n_entries = ARRAY_SIZE(mtl_pat_table); xe->pat.n_entries = ARRAY_SIZE(xelpg_pat_table);
} else if (xe->info.platform == XE_PVC) { } else if (xe->info.platform == XE_PVC) {
xe->pat.ops = &dg2_pat_ops; xe->pat.ops = &xehp_pat_ops;
xe->pat.table = pvc_pat_table; xe->pat.table = xehpc_pat_table;
xe->pat.n_entries = ARRAY_SIZE(pvc_pat_table); xe->pat.n_entries = ARRAY_SIZE(xehpc_pat_table);
} else if (xe->info.platform == XE_DG2) { } else if (xe->info.platform == XE_DG2) {
/* /*
* Table is the same as previous platforms, but programming * Table is the same as previous platforms, but programming
* method has changed. * method has changed.
*/ */
xe->pat.ops = &dg2_pat_ops; xe->pat.ops = &xehp_pat_ops;
xe->pat.table = tgl_pat_table; xe->pat.table = xelp_pat_table;
xe->pat.n_entries = ARRAY_SIZE(tgl_pat_table); xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
} else if (GRAPHICS_VERx100(xe) <= 1210) { } else if (GRAPHICS_VERx100(xe) <= 1210) {
xe->pat.ops = &tgl_pat_ops; xe->pat.ops = &xelp_pat_ops;
xe->pat.table = tgl_pat_table; xe->pat.table = xelp_pat_table;
xe->pat.n_entries = ARRAY_SIZE(tgl_pat_table); xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
} else { } else {
/* /*
* Going forward we expect to need new PAT settings for most * Going forward we expect to need new PAT settings for most
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment