Commit 62742d12 authored by Lucas De Marchi's avatar Lucas De Marchi

drm/xe: Normalize bo flags macros

The flags stored in the BO grew over time without following
much a naming pattern. First of all, get rid of the _BIT suffix that was
banned from everywhere else due to the guideline in
drivers/gpu/drm/i915/i915_reg.h that xe kind of follows:

	Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name.

Here the flags aren't for a register, but it's good practice to keep it
consistent.

Second divergence on names is the use or not of "CREATE". This is
because most of the flags are passed to xe_bo_create*() family of
functions, changing its behavior. However, since the flags are also
stored in the bo itself and checked elsewhere in the code, it seems
better to just omit the CREATE part.

With those 2 guidelines, all the flags are given the form
XE_BO_FLAG_<FLAG_NAME> with the following commands:

	git grep -le "XE_BO_" -- drivers/gpu/drm/xe | xargs sed -i \
		-e "s/XE_BO_\([_A-Z0-9]*\)_BIT/XE_BO_\1/g" \
		-e 's/XE_BO_CREATE_/XE_BO_FLAG_/g'
	git grep -le "XE_BO_" -- drivers/gpu/drm/xe | xargs sed -i -r \
		-e 's/XE_BO_(DEFER_BACKING|SCANOUT|FIXED_PLACEMENT|PAGETABLE|NEEDS_CPU_ACCESS|NEEDS_UC|INTERNAL_TEST|INTERNAL_64K|GGTT_INVALIDATE)/XE_BO_FLAG_\1/g'

And then the defines in drivers/gpu/drm/xe/xe_bo.h are adjusted to
follow the coding style.
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240322142702.186529-3-lucas.demarchi@intel.comSigned-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
parent e27f8a45
......@@ -17,7 +17,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
{
struct xe_bo *bo;
int err;
u32 flags = XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_STOLEN_BIT;
u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
if (align)
size = ALIGN(size, align);
......
......@@ -11,7 +11,7 @@
void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
{
if (bo->flags & XE_BO_CREATE_PINNED_BIT) {
if (bo->flags & XE_BO_FLAG_PINNED) {
/* Unpin our kernel fb first */
xe_bo_lock(bo, false);
xe_bo_unpin(bo);
......@@ -33,9 +33,9 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
if (ret)
return ret;
if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
if (!(bo->flags & XE_BO_FLAG_SCANOUT)) {
/*
* XE_BO_SCANOUT_BIT should ideally be set at creation, or is
* XE_BO_FLAG_SCANOUT should ideally be set at creation, or is
* automatically set when creating FB. We cannot change caching
* mode when the boect is VM_BINDed, so we can only set
* coherency with display when unbound.
......@@ -44,7 +44,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
ttm_bo_unreserve(&bo->ttm);
return -EINVAL;
}
bo->flags |= XE_BO_SCANOUT_BIT;
bo->flags |= XE_BO_FLAG_SCANOUT;
}
ttm_bo_unreserve(&bo->ttm);
......
......@@ -42,9 +42,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
if (!IS_DGFX(dev_priv)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv),
NULL, size,
ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
XE_BO_CREATE_STOLEN_BIT |
XE_BO_CREATE_PINNED_BIT);
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_STOLEN |
XE_BO_FLAG_PINNED);
if (!IS_ERR(obj))
drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n");
else
......@@ -52,9 +52,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
}
if (IS_ERR(obj)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size,
ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
XE_BO_CREATE_PINNED_BIT);
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
XE_BO_FLAG_PINNED);
}
if (IS_ERR(obj)) {
......@@ -81,8 +81,8 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) {
if (obj->flags & XE_BO_CREATE_STOLEN_BIT)
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_FLAG_STOLEN)
info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0);
else
info->fix.smem_start =
......
......@@ -45,8 +45,8 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
obj = xe_bo_create_pin_map(i915, xe_device_get_root_tile(i915),
NULL, PAGE_ALIGN(size),
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
XE_BO_FLAG_GGTT);
if (IS_ERR(obj)) {
kfree(vma);
return false;
......
......@@ -99,21 +99,21 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
if (IS_DGFX(xe))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM0_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_PAGETABLE);
XE_BO_FLAG_VRAM0 |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE);
else
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
XE_BO_CREATE_STOLEN_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_PAGETABLE);
XE_BO_FLAG_STOLEN |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE);
if (IS_ERR(dpt))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_PAGETABLE);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
......@@ -262,7 +262,7 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb,
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
!(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) {
!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
struct xe_tile *tile = xe_device_get_root_tile(xe);
/*
......@@ -355,7 +355,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma;
/* We reject creating !SCANOUT fb's, so this is weird.. */
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT));
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt);
if (IS_ERR(vma))
......
......@@ -73,8 +73,8 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
xe_device_mem_access_get(xe);
bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
......
......@@ -62,7 +62,7 @@ initial_plane_bo(struct xe_device *xe,
if (plane_config->size == 0)
return NULL;
flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT;
flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
......@@ -79,7 +79,7 @@ initial_plane_bo(struct xe_device *xe,
}
phys_base = pte & ~(page_size - 1);
flags |= XE_BO_CREATE_VRAM0_BIT;
flags |= XE_BO_FLAG_VRAM0;
/*
* We don't currently expect this to ever be placed in the
......@@ -101,7 +101,7 @@ initial_plane_bo(struct xe_device *xe,
if (!stolen)
return NULL;
phys_base = base;
flags |= XE_BO_CREATE_STOLEN_BIT;
flags |= XE_BO_FLAG_STOLEN;
/*
* If the FB is too big, just don't use it since fbdev is not very
......
......@@ -116,7 +116,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
int ret;
/* TODO: Sanity check */
unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id);
......@@ -186,7 +186,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
{
struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
struct xe_gt *__gt;
int err, i, id;
......
......@@ -36,14 +36,14 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
xe_bo_assert_held(imported);
mem_type = XE_PL_VRAM0;
if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
/* No VRAM allowed */
mem_type = XE_PL_TT;
else if (params->force_different_devices && !p2p_enabled(params))
/* No P2P */
mem_type = XE_PL_TT;
else if (params->force_different_devices && !is_dynamic(params) &&
(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
(params->mem_mask & XE_BO_FLAG_SYSTEM))
/* Pin migrated to TT */
mem_type = XE_PL_TT;
......@@ -93,7 +93,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* possible, saving a migration step as the transfer is just
* likely as fast from system memory.
*/
if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
if (params->mem_mask & XE_BO_FLAG_SYSTEM)
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
else
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
......@@ -115,11 +115,11 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* No VRAM on this device? */
if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
(params->mem_mask & XE_BO_FLAG_VRAM0))
return;
size = PAGE_SIZE;
if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) &&
if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
size = SZ_64K;
......@@ -148,7 +148,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
*/
if (params->force_different_devices &&
!p2p_enabled(params) &&
!(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
!(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
KUNIT_FAIL(test,
"xe_gem_prime_import() succeeded when it shouldn't have\n");
} else {
......@@ -161,7 +161,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* Pinning in VRAM is not allowed. */
if (!is_dynamic(params) &&
params->force_different_devices &&
!(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
!(params->mem_mask & XE_BO_FLAG_SYSTEM))
KUNIT_EXPECT_EQ(test, err, -EINVAL);
/* Otherwise only expect interrupts or success. */
else if (err && err != -EINTR && err != -ERESTARTSYS)
......@@ -180,7 +180,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
PTR_ERR(import));
} else if (!params->force_different_devices ||
p2p_enabled(params) ||
(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
/* Shouldn't fail if we can reuse same bo, use p2p or use system */
KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
PTR_ERR(import));
......@@ -203,52 +203,52 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
* gem object.
*/
static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_VRAM0},
{.mem_mask = XE_BO_FLAG_VRAM0,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM},
{.mem_mask = XE_BO_FLAG_SYSTEM,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.force_different_devices = true},
{}
......
......@@ -113,7 +113,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
bo->size,
ttm_bo_type_kernel,
region |
XE_BO_NEEDS_CPU_ACCESS);
XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %li\n",
str, PTR_ERR(remote));
......@@ -191,7 +191,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
struct kunit *test)
{
test_copy(m, bo, test, XE_BO_CREATE_SYSTEM_BIT);
test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
}
static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
......@@ -203,9 +203,9 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
return;
if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
region = XE_BO_CREATE_VRAM1_BIT;
region = XE_BO_FLAG_VRAM1;
else
region = XE_BO_CREATE_VRAM0_BIT;
region = XE_BO_FLAG_VRAM0;
test_copy(m, bo, test, region);
}
......@@ -281,8 +281,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED);
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
......@@ -290,8 +290,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED);
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
......@@ -301,8 +301,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED);
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
......
This diff is collapsed.
......@@ -23,33 +23,32 @@
#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define XE_BO_CREATE_USER_BIT BIT(0)
#define XE_BO_FLAG_USER BIT(0)
/* The bits below need to be contiguous, or things break */
#define XE_BO_CREATE_SYSTEM_BIT BIT(1)
#define XE_BO_CREATE_VRAM0_BIT BIT(2)
#define XE_BO_CREATE_VRAM1_BIT BIT(3)
#define XE_BO_CREATE_VRAM_MASK (XE_BO_CREATE_VRAM0_BIT | \
XE_BO_CREATE_VRAM1_BIT)
#define XE_BO_FLAG_SYSTEM BIT(1)
#define XE_BO_FLAG_VRAM0 BIT(2)
#define XE_BO_FLAG_VRAM1 BIT(3)
#define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
/* -- */
#define XE_BO_CREATE_STOLEN_BIT BIT(4)
#define XE_BO_CREATE_VRAM_IF_DGFX(tile) \
(IS_DGFX(tile_to_xe(tile)) ? XE_BO_CREATE_VRAM0_BIT << (tile)->id : \
XE_BO_CREATE_SYSTEM_BIT)
#define XE_BO_CREATE_GGTT_BIT BIT(5)
#define XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT BIT(6)
#define XE_BO_CREATE_PINNED_BIT BIT(7)
#define XE_BO_CREATE_NO_RESV_EVICT BIT(8)
#define XE_BO_DEFER_BACKING BIT(9)
#define XE_BO_SCANOUT_BIT BIT(10)
#define XE_BO_FIXED_PLACEMENT_BIT BIT(11)
#define XE_BO_PAGETABLE BIT(12)
#define XE_BO_NEEDS_CPU_ACCESS BIT(13)
#define XE_BO_NEEDS_UC BIT(14)
#define XE_BO_FLAG_STOLEN BIT(4)
#define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \
XE_BO_FLAG_VRAM0 << (tile)->id : \
XE_BO_FLAG_SYSTEM)
#define XE_BO_FLAG_GGTT BIT(5)
#define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
#define XE_BO_FLAG_PINNED BIT(7)
#define XE_BO_FLAG_NO_RESV_EVICT BIT(8)
#define XE_BO_FLAG_DEFER_BACKING BIT(9)
#define XE_BO_FLAG_SCANOUT BIT(10)
#define XE_BO_FLAG_FIXED_PLACEMENT BIT(11)
#define XE_BO_FLAG_PAGETABLE BIT(12)
#define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)
#define XE_BO_FLAG_NEEDS_UC BIT(14)
#define XE_BO_NEEDS_64K BIT(15)
#define XE_BO_GGTT_INVALIDATE BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(16)
/* this one is trigger internally only */
#define XE_BO_INTERNAL_TEST BIT(30)
#define XE_BO_INTERNAL_64K BIT(31)
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
#define XELPG_PPGTT_PTE_PAT3 BIT_ULL(62)
#define XE2_PPGTT_PTE_PAT4 BIT_ULL(61)
......
......@@ -146,7 +146,7 @@ int xe_bo_restore_kernel(struct xe_device *xe)
return ret;
}
if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
if (bo->flags & XE_BO_FLAG_GGTT) {
struct xe_tile *tile = bo->tile;
mutex_lock(&tile->mem.ggtt->lock);
......
......@@ -217,7 +217,7 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
dma_resv_lock(resv, NULL);
bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
0, /* Will require 1way or 2way for vm_bind */
ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT);
ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
goto error;
......
......@@ -224,11 +224,11 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
* scratch entires, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
flags = XE_BO_CREATE_PINNED_BIT;
flags = XE_BO_FLAG_PINNED;
if (ggtt->flags & XE_GGTT_FLAGS_64K)
flags |= XE_BO_CREATE_SYSTEM_BIT;
flags |= XE_BO_FLAG_SYSTEM;
else
flags |= XE_BO_CREATE_VRAM_IF_DGFX(ggtt->tile);
flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
if (IS_ERR(ggtt->scratch)) {
......@@ -375,7 +375,7 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
u16 cache_mode = bo->flags & XE_BO_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
u64 start = bo->ggtt_node.start;
u64 offset, pte;
......@@ -413,7 +413,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
xe_ggtt_map_bo(ggtt, bo);
mutex_unlock(&ggtt->lock);
if (!err && bo->flags & XE_BO_GGTT_INVALIDATE)
if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
xe_ggtt_invalidate(ggtt);
xe_device_mem_access_put(tile_to_xe(ggtt->tile));
......@@ -457,7 +457,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
xe_ggtt_remove_node(ggtt, &bo->ggtt_node,
bo->flags & XE_BO_GGTT_INVALIDATE);
bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
}
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
......
......@@ -130,8 +130,8 @@ static int query_compatibility_version(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
return PTR_ERR(bo);
......@@ -468,8 +468,8 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M,
ttm_bo_type_kernel,
XE_BO_CREATE_STOLEN_BIT |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_STOLEN |
XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -411,8 +411,8 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
kfree(csme);
return PTR_ERR(bo);
......
......@@ -273,9 +273,9 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
ads->regset_size = calculate_regset_size(gt);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -159,9 +159,9 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
primelockdep(ct);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -78,9 +78,9 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
return -EINVAL;
bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size),
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
guc->hwconfig.bo = bo;
......
......@@ -84,9 +84,9 @@ int xe_guc_log_init(struct xe_guc_log *log)
struct xe_bo *bo;
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -929,9 +929,9 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
return err;
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -59,8 +59,8 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
PXP43_HUC_AUTH_INOUT_SIZE * 2,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -518,9 +518,9 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_reg_sr_apply_whitelist(hwe);
hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(hwe->hwsp)) {
err = PTR_ERR(hwe->hwsp);
goto err_name;
......
......@@ -70,8 +70,8 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
XE_BO_NEEDS_64K | XE_BO_CREATE_PINNED_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
......
......@@ -743,9 +743,9 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
lrc->bo = xe_bo_create_pin_map(xe, tile, vm,
ring_size + xe_lrc_size(xe, hwe->class),
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
......
......@@ -127,11 +127,11 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
/* XXX: convert to managed bo */
bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE |
XE_BO_NEEDS_UC |
XE_BO_NEEDS_CPU_ACCESS);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE |
XE_BO_FLAG_NEEDS_UC |
XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out;
......
......@@ -155,8 +155,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -108,11 +108,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT |
XE_BO_CREATE_PINNED_BIT |
XE_BO_CREATE_NO_RESV_EVICT |
XE_BO_PAGETABLE);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
XE_BO_FLAG_PINNED |
XE_BO_FLAG_NO_RESV_EVICT |
XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
......
......@@ -48,9 +48,9 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
sa_manager->bo = NULL;
bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
PTR_ERR(bo));
......
......@@ -303,7 +303,7 @@ static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe,
XE_WARN_ON(IS_DGFX(xe));
/* XXX: Require BO to be mapped to GGTT? */
if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT)))
if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_GGTT)))
return -EIO;
/* GGTT is always contiguously mapped */
......
......@@ -763,8 +763,8 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
return 0;
err = uc_fw_copy(uc_fw, fw->data, fw->size,
XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
uc_fw_release(fw);
......
......@@ -3069,7 +3069,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto put_obj;
}
if (bos[i]->flags & XE_BO_INTERNAL_64K) {
if (bos[i]->flags & XE_BO_FLAG_INTERNAL_64K) {
if (XE_IOCTL_DBG(xe, obj_offset &
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment