Commit 1d81d15d authored by James Morse's avatar James Morse Committed by Borislav Petkov

x86/resctrl: Move mbm_overflow_count() into resctrl_arch_rmid_read()

resctrl_arch_rmid_read() is intended as the function that an
architecture agnostic resctrl filesystem driver can use to
read a value in bytes from a counter. Currently the function returns
the MBM values in chunks directly from hardware. When reading a bandwidth
counter, mbm_overflow_count() must be used to correct for any possible
overflow.

mbm_overflow_count() is architecture specific, its behaviour should
be part of resctrl_arch_rmid_read().

Move the mbm_overflow_count() calls into resctrl_arch_rmid_read().
This allows the resctrl filesystems's prev_msr to be removed in
favour of the architecture private version.
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarJamie Iles <quic_jiles@quicinc.com>
Reviewed-by: default avatarShaopeng Tan <tan.shaopeng@fujitsu.com>
Reviewed-by: default avatarReinette Chatre <reinette.chatre@intel.com>
Tested-by: default avatarXin Hao <xhao@linux.alibaba.com>
Tested-by: default avatarShaopeng Tan <tan.shaopeng@fujitsu.com>
Tested-by: default avatarCristian Marussi <cristian.marussi@arm.com>
Link: https://lore.kernel.org/r/20220902154829.30399-18-james.morse@arm.com
parent 8286618a
......@@ -281,7 +281,6 @@ struct rftype {
/**
* struct mbm_state - status for each MBM counter in each domain
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
* @prev_msr: Value of IA32_QM_CTR for this RMID last time we read it
* @prev_bw_chunks: Previous chunks value read for bandwidth calculation
* @prev_bw: The most recent bandwidth in MBps
* @delta_bw: Difference between the current and previous bandwidth
......@@ -289,7 +288,6 @@ struct rftype {
*/
struct mbm_state {
u64 chunks;
u64 prev_msr;
u64 prev_bw_chunks;
u32 prev_bw;
u32 delta_bw;
......
......@@ -167,9 +167,20 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
memset(am, 0, sizeof(*am));
}
static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
{
u64 shift = 64 - width, chunks;
chunks = (cur_msr << shift) - (prev_msr << shift);
return chunks >> shift;
}
int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
u32 rmid, enum resctrl_event_id eventid, u64 *val)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
struct arch_mbm_state *am;
u64 msr_val;
if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
......@@ -191,7 +202,13 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
if (msr_val & RMID_VAL_UNAVAIL)
return -EINVAL;
*val = msr_val;
am = get_arch_mbm_state(hw_dom, rmid, eventid);
if (am) {
*val = mbm_overflow_count(am->prev_msr, msr_val, hw_res->mbm_width);
am->prev_msr = msr_val;
} else {
*val = msr_val;
}
return 0;
}
......@@ -322,19 +339,10 @@ void free_rmid(u32 rmid)
list_add_tail(&entry->list, &rmid_free_lru);
}
static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
{
u64 shift = 64 - width, chunks;
chunks = (cur_msr << shift) - (prev_msr << shift);
return chunks >> shift;
}
static int __mon_event_count(u32 rmid, struct rmid_read *rr)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
struct mbm_state *m;
u64 chunks, tval = 0;
u64 tval = 0;
if (rr->first)
resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid);
......@@ -363,13 +371,10 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
if (rr->first) {
memset(m, 0, sizeof(struct mbm_state));
m->prev_msr = tval;
return 0;
}
chunks = mbm_overflow_count(m->prev_msr, tval, hw_res->mbm_width);
m->chunks += chunks;
m->prev_msr = tval;
m->chunks += tval;
rr->val += get_corrected_mbm_count(rmid, m->chunks);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment