Commit be13336e authored by Aravind Iddamsetty's avatar Aravind Iddamsetty Committed by Rodrigo Vivi

drm/xe/pmu: Drop interrupt pmu event

Drop interrupt event from PMU as that is not useful and not being used
by any UMD.

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: default avatarAravind Iddamsetty <aravind.iddamsetty@linux.intel.com>
Reviewed-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 60f3c7fc
......@@ -27,20 +27,6 @@
#define IIR(offset) XE_REG(offset + 0x8)
#define IER(offset) XE_REG(offset + 0xc)
/*
* Interrupt statistic for PMU. Increments the counter only if the
* interrupt originated from the GPU so interrupts from a device which
* shares the interrupt line are not accounted.
*/
static __always_inline void xe_pmu_irq_stats(struct xe_device *xe)
{
/*
* A clever compiler translates that into INC. A not so clever one
* should at least prevent store tearing.
*/
WRITE_ONCE(xe->pmu.irq_count, xe->pmu.irq_count + 1);
}
static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg)
{
u32 val = xe_mmio_read32(mmio, reg);
......@@ -360,8 +346,6 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
xe_display_irq_enable(xe, gu_misc_iir);
xe_pmu_irq_stats(xe);
return IRQ_HANDLED;
}
......@@ -458,8 +442,6 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
dg1_intr_enable(xe, false);
xe_display_irq_enable(xe, gu_misc_iir);
xe_pmu_irq_stats(xe);
return IRQ_HANDLED;
}
......
......@@ -61,7 +61,7 @@ static u64 __engine_group_busyness_read(struct xe_gt *gt, int sample_type)
static u64 engine_group_busyness_read(struct xe_gt *gt, u64 config)
{
int sample_type = config_counter(config) - 1;
int sample_type = config_counter(config);
const unsigned int gt_id = gt->info.id;
struct xe_device *xe = gt->tile->xe;
struct xe_pmu *pmu = &xe->pmu;
......@@ -114,10 +114,6 @@ config_status(struct xe_device *xe, u64 config)
return -ENOENT;
switch (config_counter(config)) {
case XE_PMU_INTERRUPTS(0):
if (gt_id)
return -ENOENT;
break;
case XE_PMU_RENDER_GROUP_BUSY(0):
case XE_PMU_COPY_GROUP_BUSY(0):
case XE_PMU_ANY_ENGINE_GROUP_BUSY(0):
......@@ -181,13 +177,9 @@ static u64 __xe_pmu_event_read(struct perf_event *event)
const unsigned int gt_id = config_gt_id(event->attr.config);
const u64 config = event->attr.config;
struct xe_gt *gt = xe_device_get_gt(xe, gt_id);
struct xe_pmu *pmu = &xe->pmu;
u64 val;
switch (config_counter(config)) {
case XE_PMU_INTERRUPTS(0):
val = READ_ONCE(pmu->irq_count);
break;
case XE_PMU_RENDER_GROUP_BUSY(0):
case XE_PMU_COPY_GROUP_BUSY(0):
case XE_PMU_ANY_ENGINE_GROUP_BUSY(0):
......@@ -361,11 +353,10 @@ create_event_attributes(struct xe_pmu *pmu)
const char *unit;
bool global;
} events[] = {
__global_event(0, "interrupts", NULL),
__event(1, "render-group-busy", "ns"),
__event(2, "copy-group-busy", "ns"),
__event(3, "media-group-busy", "ns"),
__event(4, "any-engine-group-busy", "ns"),
__event(0, "render-group-busy", "ns"),
__event(1, "copy-group-busy", "ns"),
__event(2, "media-group-busy", "ns"),
__event(3, "any-engine-group-busy", "ns"),
};
struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
......
......@@ -51,14 +51,6 @@ struct xe_pmu {
*
*/
u64 sample[XE_PMU_MAX_GT][__XE_NUM_PMU_SAMPLERS];
/**
* @irq_count: Number of interrupts
*
* Intentionally unsigned long to avoid atomics or heuristics on 32bit.
* 4e9 interrupts are a lot and postprocessing can really deal with an
* occasional wraparound easily. It's 32bit after all.
*/
unsigned long irq_count;
/**
* @events_attr_group: Device events attribute group.
*/
......
......@@ -977,7 +977,7 @@ struct drm_xe_wait_user_fence {
* in 'struct perf_event_attr' as part of perf_event_open syscall to read a
* particular event.
*
* For example to open the XE_PMU_INTERRUPTS(0):
* For example to open the XE_PMU_RENDER_GROUP_BUSY(0):
*
* .. code-block:: C
*
......@@ -991,7 +991,7 @@ struct drm_xe_wait_user_fence {
* attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED;
* attr.use_clockid = 1;
* attr.clockid = CLOCK_MONOTONIC;
* attr.config = XE_PMU_INTERRUPTS(0);
* attr.config = XE_PMU_RENDER_GROUP_BUSY(0);
*
* fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
*/
......@@ -1004,11 +1004,10 @@ struct drm_xe_wait_user_fence {
#define ___XE_PMU_OTHER(gt, x) \
(((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT))
#define XE_PMU_INTERRUPTS(gt) ___XE_PMU_OTHER(gt, 0)
#define XE_PMU_RENDER_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 1)
#define XE_PMU_COPY_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 2)
#define XE_PMU_MEDIA_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 3)
#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 4)
#define XE_PMU_RENDER_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 0)
#define XE_PMU_COPY_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 1)
#define XE_PMU_MEDIA_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 2)
#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 3)
#if defined(__cplusplus)
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment