Commit 845f64bd authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi

drm/xe: Introduce a range-fence utility

Add generic utility to track range conflicts signaled by a dma-fence.
Tracking implemented via an interval tree. An example use case being
tracking conflicts for pending (un)binds from multiple bind engines. By
being generic ths idea would this could moved to the DRM level and used
in multiple drivers for similar problems.

v2: Make interval tree functions static (CI)
v3: Remove non-static cleanup function (CI)
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 0043a3e8
......@@ -95,6 +95,7 @@ xe-y += xe_bb.o \
xe_pt.o \
xe_pt_walk.o \
xe_query.o \
xe_range_fence.o \
xe_reg_sr.o \
xe_reg_whitelist.o \
xe_rtp.o \
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <linux/dma-fence.h>
#include <linux/interval_tree_generic.h>
#include <linux/slab.h>
#include "xe_macros.h"
#include "xe_range_fence.h"
#define XE_RANGE_TREE_START(_node) ((_node)->start)
#define XE_RANGE_TREE_LAST(_node) ((_node)->last)
INTERVAL_TREE_DEFINE(struct xe_range_fence, rb, u64, __subtree_last,
XE_RANGE_TREE_START, XE_RANGE_TREE_LAST, static,
xe_range_fence_tree);
static void
xe_range_fence_signal_notify(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct xe_range_fence *rfence = container_of(cb, typeof(*rfence), cb);
struct xe_range_fence_tree *tree = rfence->tree;
llist_add(&rfence->link, &tree->list);
}
static bool __xe_range_fence_tree_cleanup(struct xe_range_fence_tree *tree)
{
struct llist_node *node = llist_del_all(&tree->list);
struct xe_range_fence *rfence, *next;
llist_for_each_entry_safe(rfence, next, node, link) {
xe_range_fence_tree_remove(rfence, &tree->root);
dma_fence_put(rfence->fence);
kfree(rfence);
}
return !!node;
}
/**
* xe_range_fence_insert() - range fence insert
* @tree: range fence tree to insert intoi
* @rfence: range fence
* @ops: range fence ops
* @start: start address of range fence
* @last: last address of range fence
* @fence: dma fence which signals range fence can be removed + freed
*
* Return: 0 on success, non-zero on failure
*/
int xe_range_fence_insert(struct xe_range_fence_tree *tree,
struct xe_range_fence *rfence,
const struct xe_range_fence_ops *ops,
u64 start, u64 last, struct dma_fence *fence)
{
int err = 0;
__xe_range_fence_tree_cleanup(tree);
if (dma_fence_is_signaled(fence))
goto free;
rfence->ops = ops;
rfence->start = start;
rfence->last = last;
rfence->tree = tree;
rfence->fence = dma_fence_get(fence);
err = dma_fence_add_callback(fence, &rfence->cb,
xe_range_fence_signal_notify);
if (err == -ENOENT) {
dma_fence_put(fence);
err = 0;
goto free;
} else if (err == 0) {
xe_range_fence_tree_insert(rfence, &tree->root);
return 0;
}
free:
if (ops->free)
ops->free(rfence);
return err;
}
static void xe_range_fence_tree_remove_all(struct xe_range_fence_tree *tree)
{
struct xe_range_fence *rfence;
bool retry = true;
rfence = xe_range_fence_tree_iter_first(&tree->root, 0, U64_MAX);
while (rfence) {
/* Should be ok with the minimalistic callback */
if (dma_fence_remove_callback(rfence->fence, &rfence->cb))
llist_add(&rfence->link, &tree->list);
rfence = xe_range_fence_tree_iter_next(rfence, 0, U64_MAX);
}
while (retry)
retry = __xe_range_fence_tree_cleanup(tree);
}
/**
* xe_range_fence_tree_init() - Init range fence tree
* @tree: range fence tree
*/
void xe_range_fence_tree_init(struct xe_range_fence_tree *tree)
{
memset(tree, 0, sizeof(*tree));
}
/**
* xe_range_fence_tree_fini() - Fini range fence tree
* @tree: range fence tree
*/
void xe_range_fence_tree_fini(struct xe_range_fence_tree *tree)
{
xe_range_fence_tree_remove_all(tree);
XE_WARN_ON(!RB_EMPTY_ROOT(&tree->root.rb_root));
}
/**
* xe_range_fence_tree_first() - range fence tree iterator first
* @tree: range fence tree
* @start: start address of range fence
* @last: last address of range fence
*
* Return: first range fence found in range or NULL
*/
struct xe_range_fence *
xe_range_fence_tree_first(struct xe_range_fence_tree *tree, u64 start,
u64 last)
{
return xe_range_fence_tree_iter_first(&tree->root, start, last);
}
/**
* xe_range_fence_tree_next() - range fence tree iterator next
* @rfence: current range fence
* @start: start address of range fence
* @last: last address of range fence
*
* Return: next range fence found in range or NULL
*/
struct xe_range_fence *
xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last)
{
return xe_range_fence_tree_iter_next(rfence, start, last);
}
const struct xe_range_fence_ops xe_range_fence_kfree_ops = {
.free = (void (*)(struct xe_range_fence *rfence)) kfree,
};
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _XE_RANGE_FENCE_H_
#define _XE_RANGE_FENCE_H_
#include <linux/dma-fence.h>
#include <linux/rbtree.h>
#include <linux/types.h>
struct xe_range_fence_tree;
struct xe_range_fence;
/** struct xe_range_fence_ops - XE range fence ops */
struct xe_range_fence_ops {
/** @free: free range fence op */
void (*free)(struct xe_range_fence *rfence);
};
/** struct xe_range_fence - XE range fence (address conflict tracking) */
struct xe_range_fence {
/** @rb: RB tree node inserted into interval tree */
struct rb_node rb;
/** @start: start address of range fence is interval tree */
u64 start;
/** @last: last address (inclusive) of range fence is interval tree */
u64 last;
/** @__subtree_last: interval tree internal usage */
u64 __subtree_last;
/**
* @fence: fence signals address in range fence no longer has conflict
*/
struct dma_fence *fence;
/** @tree: interval tree which range fence belongs to */
struct xe_range_fence_tree *tree;
/**
* @cb: callback when fence signals to remove range fence free from interval tree
*/
struct dma_fence_cb cb;
/** @link: used to defer free of range fence to non-irq context */
struct llist_node link;
/** @ops: range fence ops */
const struct xe_range_fence_ops *ops;
};
/** struct xe_range_fence_tree - interval tree to store range fences */
struct xe_range_fence_tree {
/** @root: interval tree root */
struct rb_root_cached root;
/** @list: list of pending range fences to be freed */
struct llist_head list;
};
extern const struct xe_range_fence_ops xe_range_fence_kfree_ops;
struct xe_range_fence *
xe_range_fence_tree_first(struct xe_range_fence_tree *tree, u64 start,
u64 last);
struct xe_range_fence *
xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last);
void xe_range_fence_tree_init(struct xe_range_fence_tree *tree);
void xe_range_fence_tree_fini(struct xe_range_fence_tree *tree);
int xe_range_fence_insert(struct xe_range_fence_tree *tree,
struct xe_range_fence *rfence,
const struct xe_range_fence_ops *ops,
u64 start, u64 end,
struct dma_fence *fence);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment