Commit 1ad0510c authored by Rob Clark's avatar Rob Clark

Merge tag 'dma-fence-deadline' into HEAD

This series adds a deadline hint to fences, so realtime deadlines
such as vblank can be communicated to the fence signaller for power/
frequency management decisions.

This is partially inspired by a trick i915 does, but implemented
via dma-fence for a couple of reasons:

1) To continue to be able to use the atomic helpers
2) To support cases where display and gpu are different drivers

See https://patchwork.freedesktop.org/series/93035/

This does not yet add any UAPI, although this will be needed in
a number of cases:

1) Workloads "ping-ponging" between CPU and GPU, where we don't
   want the GPU freq governor to interpret time stalled waiting
   for GPU as "idle" time
2) Cases where the compositor is waiting for fences to be signaled
   before issuing the atomic ioctl, for example to maintain 60fps
   cursor updates even when the GPU is not able to maintain that
   framerate.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parents 8559da8f d39e48ca
...@@ -164,6 +164,12 @@ DMA Fence Signalling Annotations ...@@ -164,6 +164,12 @@ DMA Fence Signalling Annotations
.. kernel-doc:: drivers/dma-buf/dma-fence.c .. kernel-doc:: drivers/dma-buf/dma-fence.c
:doc: fence signalling annotation :doc: fence signalling annotation
DMA Fence Deadline Hints
~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/dma-buf/dma-fence.c
:doc: deadline hints
DMA Fences Functions Reference DMA Fences Functions Reference
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
...@@ -197,8 +203,8 @@ DMA Fence unwrap ...@@ -197,8 +203,8 @@ DMA Fence unwrap
.. kernel-doc:: include/linux/dma-fence-unwrap.h .. kernel-doc:: include/linux/dma-fence-unwrap.h
:internal: :internal:
DMA Fence uABI/Sync File DMA Fence Sync File
~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/dma-buf/sync_file.c .. kernel-doc:: drivers/dma-buf/sync_file.c
:export: :export:
...@@ -206,6 +212,12 @@ DMA Fence uABI/Sync File ...@@ -206,6 +212,12 @@ DMA Fence uABI/Sync File
.. kernel-doc:: include/linux/sync_file.h .. kernel-doc:: include/linux/sync_file.h
:internal: :internal:
DMA Fence Sync File uABI
~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: include/uapi/linux/sync_file.h
:internal:
Indefinite DMA Fences Indefinite DMA Fences
~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~
......
...@@ -123,12 +123,23 @@ static void dma_fence_array_release(struct dma_fence *fence) ...@@ -123,12 +123,23 @@ static void dma_fence_array_release(struct dma_fence *fence)
dma_fence_free(fence); dma_fence_free(fence);
} }
static void dma_fence_array_set_deadline(struct dma_fence *fence,
ktime_t deadline)
{
struct dma_fence_array *array = to_dma_fence_array(fence);
unsigned i;
for (i = 0; i < array->num_fences; ++i)
dma_fence_set_deadline(array->fences[i], deadline);
}
const struct dma_fence_ops dma_fence_array_ops = { const struct dma_fence_ops dma_fence_array_ops = {
.get_driver_name = dma_fence_array_get_driver_name, .get_driver_name = dma_fence_array_get_driver_name,
.get_timeline_name = dma_fence_array_get_timeline_name, .get_timeline_name = dma_fence_array_get_timeline_name,
.enable_signaling = dma_fence_array_enable_signaling, .enable_signaling = dma_fence_array_enable_signaling,
.signaled = dma_fence_array_signaled, .signaled = dma_fence_array_signaled,
.release = dma_fence_array_release, .release = dma_fence_array_release,
.set_deadline = dma_fence_array_set_deadline,
}; };
EXPORT_SYMBOL(dma_fence_array_ops); EXPORT_SYMBOL(dma_fence_array_ops);
......
...@@ -206,6 +206,17 @@ static void dma_fence_chain_release(struct dma_fence *fence) ...@@ -206,6 +206,17 @@ static void dma_fence_chain_release(struct dma_fence *fence)
dma_fence_free(fence); dma_fence_free(fence);
} }
static void dma_fence_chain_set_deadline(struct dma_fence *fence,
ktime_t deadline)
{
dma_fence_chain_for_each(fence, fence) {
struct dma_fence *f = dma_fence_chain_contained(fence);
dma_fence_set_deadline(f, deadline);
}
}
const struct dma_fence_ops dma_fence_chain_ops = { const struct dma_fence_ops dma_fence_chain_ops = {
.use_64bit_seqno = true, .use_64bit_seqno = true,
.get_driver_name = dma_fence_chain_get_driver_name, .get_driver_name = dma_fence_chain_get_driver_name,
...@@ -213,6 +224,7 @@ const struct dma_fence_ops dma_fence_chain_ops = { ...@@ -213,6 +224,7 @@ const struct dma_fence_ops dma_fence_chain_ops = {
.enable_signaling = dma_fence_chain_enable_signaling, .enable_signaling = dma_fence_chain_enable_signaling,
.signaled = dma_fence_chain_signaled, .signaled = dma_fence_chain_signaled,
.release = dma_fence_chain_release, .release = dma_fence_chain_release,
.set_deadline = dma_fence_chain_set_deadline,
}; };
EXPORT_SYMBOL(dma_fence_chain_ops); EXPORT_SYMBOL(dma_fence_chain_ops);
......
...@@ -912,6 +912,65 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, ...@@ -912,6 +912,65 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
} }
EXPORT_SYMBOL(dma_fence_wait_any_timeout); EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
* DOC: deadline hints
*
* In an ideal world, it would be possible to pipeline a workload sufficiently
* that a utilization based device frequency governor could arrive at a minimum
* frequency that meets the requirements of the use-case, in order to minimize
* power consumption. But in the real world there are many workloads which
* defy this ideal. For example, but not limited to:
*
* * Workloads that ping-pong between device and CPU, with alternating periods
* of CPU waiting for device, and device waiting on CPU. This can result in
* devfreq and cpufreq seeing idle time in their respective domains and in
* result reduce frequency.
*
* * Workloads that interact with a periodic time based deadline, such as double
* buffered GPU rendering vs vblank sync'd page flipping. In this scenario,
* missing a vblank deadline results in an *increase* in idle time on the GPU
* (since it has to wait an additional vblank period), sending a signal to
* the GPU's devfreq to reduce frequency, when in fact the opposite is what is
* needed.
*
* To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline.
* The deadline hint provides a way for the waiting driver, or userspace, to
* convey an appropriate sense of urgency to the signaling driver.
*
* A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace
* facing APIs). The time could either be some point in the future (such as
* the vblank based deadline for page-flipping, or the start of a compositor's
* composition cycle), or the current time to indicate an immediate deadline
* hint (Ie. forward progress cannot be made until this fence is signaled).
*
* Multiple deadlines may be set on a given fence, even in parallel. See the
* documentation for &dma_fence_ops.set_deadline.
*
* The deadline hint is just that, a hint. The driver that created the fence
* may react by increasing frequency, making different scheduling choices, etc.
* Or doing nothing at all.
*/
/**
* dma_fence_set_deadline - set desired fence-wait deadline hint
* @fence: the fence that is to be waited on
* @deadline: the time by which the waiter hopes for the fence to be
* signaled
*
* Give the fence signaler a hint about an upcoming deadline, such as
* vblank, by which point the waiter would prefer the fence to be
* signaled by. This is intended to give feedback to the fence signaler
* to aid in power management decisions, such as boosting GPU frequency
* if a periodic vblank deadline is approaching but the fence is not
* yet signaled..
*/
void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
{
if (fence->ops->set_deadline && !dma_fence_is_signaled(fence))
fence->ops->set_deadline(fence, deadline);
}
EXPORT_SYMBOL(dma_fence_set_deadline);
/** /**
* dma_fence_describe - Dump fence describtion into seq_file * dma_fence_describe - Dump fence describtion into seq_file
* @fence: the 6fence to describe * @fence: the 6fence to describe
......
...@@ -684,6 +684,28 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, ...@@ -684,6 +684,28 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
} }
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
/**
* dma_resv_set_deadline - Set a deadline on reservation's objects fences
* @obj: the reservation object
* @usage: controls which fences to include, see enum dma_resv_usage.
* @deadline: the requested deadline (MONOTONIC)
*
* May be called without holding the dma_resv lock. Sets @deadline on
* all fences filtered by @usage.
*/
void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
ktime_t deadline)
{
struct dma_resv_iter cursor;
struct dma_fence *fence;
dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
dma_fence_set_deadline(fence, deadline);
}
dma_resv_iter_end(&cursor);
}
EXPORT_SYMBOL_GPL(dma_resv_set_deadline);
/** /**
* dma_resv_test_signaled - Test if a reservation object's fences have been * dma_resv_test_signaled - Test if a reservation object's fences have been
......
...@@ -1511,6 +1511,41 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, ...@@ -1511,6 +1511,41 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
} }
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
/*
* For atomic updates which touch just a single CRTC, calculate the time of the
* next vblank, and inform all the fences of the deadline.
*/
static void set_fence_deadline(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
ktime_t vbltime = 0;
int i;
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
ktime_t v;
if (drm_crtc_next_vblank_start(crtc, &v))
continue;
if (!vbltime || ktime_before(v, vbltime))
vbltime = v;
}
/* If no CRTCs updated, then nothing to do: */
if (!vbltime)
return;
for_each_new_plane_in_state (state, plane, new_plane_state, i) {
if (!new_plane_state->fence)
continue;
dma_fence_set_deadline(new_plane_state->fence, vbltime);
}
}
/** /**
* drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
* @dev: DRM device * @dev: DRM device
...@@ -1540,6 +1575,8 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, ...@@ -1540,6 +1575,8 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_plane_state *new_plane_state; struct drm_plane_state *new_plane_state;
int i, ret; int i, ret;
set_fence_deadline(dev, state);
for_each_new_plane_in_state(state, plane, new_plane_state, i) { for_each_new_plane_in_state(state, plane, new_plane_state, i) {
if (!new_plane_state->fence) if (!new_plane_state->fence)
continue; continue;
......
...@@ -844,10 +844,9 @@ bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc, ...@@ -844,10 +844,9 @@ bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp); EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
/** /**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent * drm_crtc_get_last_vbltimestamp - retrieve raw timestamp for the most
* vblank interval * recent vblank interval
* @dev: DRM device * @crtc: CRTC whose vblank timestamp to retrieve
* @pipe: index of CRTC whose vblank timestamp to retrieve
* @tvblank: Pointer to target time which should receive the timestamp * @tvblank: Pointer to target time which should receive the timestamp
* @in_vblank_irq: * @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers * True when called from drm_crtc_handle_vblank(). Some drivers
...@@ -865,10 +864,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp); ...@@ -865,10 +864,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
* True if timestamp is considered to be very precise, false otherwise. * True if timestamp is considered to be very precise, false otherwise.
*/ */
static bool static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, drm_crtc_get_last_vbltimestamp(struct drm_crtc *crtc, ktime_t *tvblank,
ktime_t *tvblank, bool in_vblank_irq) bool in_vblank_irq)
{ {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
bool ret = false; bool ret = false;
/* Define requested maximum error on timestamps (nanoseconds). */ /* Define requested maximum error on timestamps (nanoseconds). */
...@@ -876,8 +874,6 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, ...@@ -876,8 +874,6 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
/* Query driver if possible and precision timestamping enabled. */ /* Query driver if possible and precision timestamping enabled. */
if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) { if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error, ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error,
tvblank, in_vblank_irq); tvblank, in_vblank_irq);
} }
...@@ -891,6 +887,15 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, ...@@ -891,6 +887,15 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
return ret; return ret;
} }
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
ktime_t *tvblank, bool in_vblank_irq)
{
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
return drm_crtc_get_last_vbltimestamp(crtc, tvblank, in_vblank_irq);
}
/** /**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value * drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve * @crtc: which counter to retrieve
...@@ -980,6 +985,36 @@ u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, ...@@ -980,6 +985,36 @@ u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
} }
EXPORT_SYMBOL(drm_crtc_vblank_count_and_time); EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
/**
* drm_crtc_next_vblank_start - calculate the time of the next vblank
* @crtc: the crtc for which to calculate next vblank time
* @vblanktime: pointer to time to receive the next vblank timestamp.
*
* Calculate the expected time of the start of the next vblank period,
* based on time of previous vblank and frame duration
*/
int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime)
{
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[pipe];
struct drm_display_mode *mode = &vblank->hwmode;
u64 vblank_start;
if (!vblank->framedur_ns || !vblank->linedur_ns)
return -EINVAL;
if (!drm_crtc_get_last_vbltimestamp(crtc, vblanktime, false))
return -EINVAL;
vblank_start = DIV_ROUND_DOWN_ULL(
(u64)vblank->framedur_ns * mode->crtc_vblank_start,
mode->crtc_vtotal);
*vblanktime = ktime_add(*vblanktime, ns_to_ktime(vblank_start));
return 0;
}
EXPORT_SYMBOL(drm_crtc_next_vblank_start);
static void send_vblank_event(struct drm_device *dev, static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e, struct drm_pending_vblank_event *e,
u64 seq, ktime_t now) u64 seq, ktime_t now)
......
...@@ -123,6 +123,37 @@ static void drm_sched_fence_release_finished(struct dma_fence *f) ...@@ -123,6 +123,37 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
dma_fence_put(&fence->scheduled); dma_fence_put(&fence->scheduled);
} }
static void drm_sched_fence_set_deadline_finished(struct dma_fence *f,
ktime_t deadline)
{
struct drm_sched_fence *fence = to_drm_sched_fence(f);
struct dma_fence *parent;
unsigned long flags;
spin_lock_irqsave(&fence->lock, flags);
/* If we already have an earlier deadline, keep it: */
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
ktime_before(fence->deadline, deadline)) {
spin_unlock_irqrestore(&fence->lock, flags);
return;
}
fence->deadline = deadline;
set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
spin_unlock_irqrestore(&fence->lock, flags);
/*
* smp_load_aquire() to ensure that if we are racing another
* thread calling drm_sched_fence_set_parent(), that we see
* the parent set before it calls test_bit(HAS_DEADLINE_BIT)
*/
parent = smp_load_acquire(&fence->parent);
if (parent)
dma_fence_set_deadline(parent, deadline);
}
static const struct dma_fence_ops drm_sched_fence_ops_scheduled = { static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name, .get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name, .get_timeline_name = drm_sched_fence_get_timeline_name,
...@@ -133,6 +164,7 @@ static const struct dma_fence_ops drm_sched_fence_ops_finished = { ...@@ -133,6 +164,7 @@ static const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name, .get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name, .get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_finished, .release = drm_sched_fence_release_finished,
.set_deadline = drm_sched_fence_set_deadline_finished,
}; };
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
...@@ -147,6 +179,20 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) ...@@ -147,6 +179,20 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
} }
EXPORT_SYMBOL(to_drm_sched_fence); EXPORT_SYMBOL(to_drm_sched_fence);
void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
struct dma_fence *fence)
{
/*
* smp_store_release() to ensure another thread racing us
* in drm_sched_fence_set_deadline_finished() sees the
* fence's parent set before test_bit()
*/
smp_store_release(&s_fence->parent, dma_fence_get(fence));
if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
&s_fence->finished.flags))
dma_fence_set_deadline(fence, s_fence->deadline);
}
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
void *owner) void *owner)
{ {
......
...@@ -1048,7 +1048,7 @@ static int drm_sched_main(void *param) ...@@ -1048,7 +1048,7 @@ static int drm_sched_main(void *param)
drm_sched_fence_scheduled(s_fence); drm_sched_fence_scheduled(s_fence);
if (!IS_ERR_OR_NULL(fence)) { if (!IS_ERR_OR_NULL(fence)) {
s_fence->parent = dma_fence_get(fence); drm_sched_fence_set_parent(s_fence, fence);
/* Drop for original kref_init of the fence */ /* Drop for original kref_init of the fence */
dma_fence_put(fence); dma_fence_put(fence);
......
...@@ -230,6 +230,7 @@ bool drm_dev_has_vblank(const struct drm_device *dev); ...@@ -230,6 +230,7 @@ bool drm_dev_has_vblank(const struct drm_device *dev);
u64 drm_crtc_vblank_count(struct drm_crtc *crtc); u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime); ktime_t *vblanktime);
int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime);
void drm_crtc_send_vblank_event(struct drm_crtc *crtc, void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e); struct drm_pending_vblank_event *e);
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
......
...@@ -41,6 +41,15 @@ ...@@ -41,6 +41,15 @@
*/ */
#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
/**
* DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
*
* Because we could have a deadline hint can be set before the backing hw
* fence is created, we need to keep track of whether a deadline has already
* been set.
*/
#define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
enum dma_resv_usage; enum dma_resv_usage;
struct dma_resv; struct dma_resv;
struct drm_gem_object; struct drm_gem_object;
...@@ -282,6 +291,12 @@ struct drm_sched_fence { ...@@ -282,6 +291,12 @@ struct drm_sched_fence {
*/ */
struct dma_fence finished; struct dma_fence finished;
/**
* @deadline: deadline set on &drm_sched_fence.finished which
* potentially needs to be propagated to &drm_sched_fence.parent
*/
ktime_t deadline;
/** /**
* @parent: the fence returned by &drm_sched_backend_ops.run_job * @parent: the fence returned by &drm_sched_backend_ops.run_job
* when scheduling the job on hardware. We signal the * when scheduling the job on hardware. We signal the
...@@ -574,6 +589,8 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, ...@@ -574,6 +589,8 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority); enum drm_sched_priority priority);
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
struct dma_fence *fence);
struct drm_sched_fence *drm_sched_fence_alloc( struct drm_sched_fence *drm_sched_fence_alloc(
struct drm_sched_entity *s_entity, void *owner); struct drm_sched_entity *s_entity, void *owner);
void drm_sched_fence_init(struct drm_sched_fence *fence, void drm_sched_fence_init(struct drm_sched_fence *fence,
......
...@@ -257,6 +257,26 @@ struct dma_fence_ops { ...@@ -257,6 +257,26 @@ struct dma_fence_ops {
*/ */
void (*timeline_value_str)(struct dma_fence *fence, void (*timeline_value_str)(struct dma_fence *fence,
char *str, int size); char *str, int size);
/**
* @set_deadline:
*
* Callback to allow a fence waiter to inform the fence signaler of
* an upcoming deadline, such as vblank, by which point the waiter
* would prefer the fence to be signaled by. This is intended to
* give feedback to the fence signaler to aid in power management
* decisions, such as boosting GPU frequency.
*
* This is called without &dma_fence.lock held, it can be called
* multiple times and from any context. Locking is up to the callee
* if it has some state to manage. If multiple deadlines are set,
* the expectation is to track the soonest one. If the deadline is
* before the current time, it should be interpreted as an immediate
* deadline.
*
* This callback is optional.
*/
void (*set_deadline)(struct dma_fence *fence, ktime_t deadline);
}; };
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
...@@ -583,6 +603,8 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) ...@@ -583,6 +603,8 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline);
struct dma_fence *dma_fence_get_stub(void); struct dma_fence *dma_fence_get_stub(void);
struct dma_fence *dma_fence_allocate_private_stub(void); struct dma_fence *dma_fence_allocate_private_stub(void);
u64 dma_fence_context_alloc(unsigned num); u64 dma_fence_context_alloc(unsigned num);
......
...@@ -479,6 +479,8 @@ int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, ...@@ -479,6 +479,8 @@ int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
bool intr, unsigned long timeout); bool intr, unsigned long timeout);
void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
ktime_t deadline);
bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage); bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
......
...@@ -16,12 +16,16 @@ ...@@ -16,12 +16,16 @@
#include <linux/types.h> #include <linux/types.h>
/** /**
* struct sync_merge_data - data passed to merge ioctl * struct sync_merge_data - SYNC_IOC_MERGE: merge two fences
* @name: name of new fence * @name: name of new fence
* @fd2: file descriptor of second fence * @fd2: file descriptor of second fence
* @fence: returns the fd of the new fence to userspace * @fence: returns the fd of the new fence to userspace
* @flags: merge_data flags * @flags: merge_data flags
* @pad: padding for 64-bit alignment, should always be zero * @pad: padding for 64-bit alignment, should always be zero
*
* Creates a new fence containing copies of the sync_pts in both
* the calling fd and sync_merge_data.fd2. Returns the new fence's
* fd in sync_merge_data.fence
*/ */
struct sync_merge_data { struct sync_merge_data {
char name[32]; char name[32];
...@@ -34,8 +38,8 @@ struct sync_merge_data { ...@@ -34,8 +38,8 @@ struct sync_merge_data {
/** /**
* struct sync_fence_info - detailed fence information * struct sync_fence_info - detailed fence information
* @obj_name: name of parent sync_timeline * @obj_name: name of parent sync_timeline
* @driver_name: name of driver implementing the parent * @driver_name: name of driver implementing the parent
* @status: status of the fence 0:active 1:signaled <0:error * @status: status of the fence 0:active 1:signaled <0:error
* @flags: fence_info flags * @flags: fence_info flags
* @timestamp_ns: timestamp of status change in nanoseconds * @timestamp_ns: timestamp of status change in nanoseconds
*/ */
...@@ -48,14 +52,19 @@ struct sync_fence_info { ...@@ -48,14 +52,19 @@ struct sync_fence_info {
}; };
/** /**
* struct sync_file_info - data returned from fence info ioctl * struct sync_file_info - SYNC_IOC_FILE_INFO: get detailed information on a sync_file
* @name: name of fence * @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error * @status: status of fence. 1: signaled 0:active <0:error
* @flags: sync_file_info flags * @flags: sync_file_info flags
* @num_fences number of fences in the sync_file * @num_fences number of fences in the sync_file
* @pad: padding for 64-bit alignment, should always be zero * @pad: padding for 64-bit alignment, should always be zero
* @sync_fence_info: pointer to array of structs sync_fence_info with all * @sync_fence_info: pointer to array of struct &sync_fence_info with all
* fences in the sync_file * fences in the sync_file
*
* Takes a struct sync_file_info. If num_fences is 0, the field is updated
* with the actual number of fences. If num_fences is > 0, the system will
* use the pointer provided on sync_fence_info to return up to num_fences of
* struct sync_fence_info, with detailed fence information.
*/ */
struct sync_file_info { struct sync_file_info {
char name[32]; char name[32];
...@@ -69,30 +78,14 @@ struct sync_file_info { ...@@ -69,30 +78,14 @@ struct sync_file_info {
#define SYNC_IOC_MAGIC '>' #define SYNC_IOC_MAGIC '>'
/** /*
* Opcodes 0, 1 and 2 were burned during a API change to avoid users of the * Opcodes 0, 1 and 2 were burned during a API change to avoid users of the
* old API to get weird errors when trying to handling sync_files. The API * old API to get weird errors when trying to handling sync_files. The API
* change happened during the de-stage of the Sync Framework when there was * change happened during the de-stage of the Sync Framework when there was
* no upstream users available. * no upstream users available.
*/ */
/**
* DOC: SYNC_IOC_MERGE - merge two fences
*
* Takes a struct sync_merge_data. Creates a new fence containing copies of
* the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
* new fence's fd in sync_merge_data.fence
*/
#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data) #define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
/**
* DOC: SYNC_IOC_FILE_INFO - get detailed information on a sync_file
*
* Takes a struct sync_file_info. If num_fences is 0, the field is updated
* with the actual number of fences. If num_fences is > 0, the system will
* use the pointer provided on sync_fence_info to return up to num_fences of
* struct sync_fence_info, with detailed fence information.
*/
#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info) #define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
#endif /* _UAPI_LINUX_SYNC_H */ #endif /* _UAPI_LINUX_SYNC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment