Commit d89b7379 authored by Vikas Shivappa's avatar Vikas Shivappa Committed by Thomas Gleixner

x86/intel_rdt/cqm: Add mon_data

Add a mon_data directory for the root rdtgroup and all other rdtgroups.
The directory holds all of the monitored data for all domains and events
of all resources being monitored.

The mon_data itself has a list of directories in the format
mon_<domain_name>_<domain_id>. Each of these subdirectories contain one
file per event in the mode "0444". Reading the file displays a snapshot
of the monitored data for the event the file represents.

For ex, on a 2 socket Broadwell with llc_occupancy being
monitored the mon_data contents look as below:

$ ls /sys/fs/resctrl/p1/mon_data/
mon_L3_00
mon_L3_01

Each domain directory has one file per event:
$ ls /sys/fs/resctrl/p1/mon_data/mon_L3_00/
llc_occupancy

To read current llc_occupancy of ctrl_mon group p1
$ cat /sys/fs/resctrl/p1/mon_data/mon_L3_00/llc_occupancy
33789096

[This patch idea is based on Tony's sample patches to organise data in a
per domain directory and have one file per event (and use the fp->priv to
store mon data bits)]
Signed-off-by: default avatarVikas Shivappa <vikas.shivappa@linux.intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: ravi.v.shankar@intel.com
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
Cc: peterz@infradead.org
Cc: eranian@google.com
Cc: vikas.shivappa@intel.com
Cc: ak@linux.intel.com
Cc: davidcc@google.com
Cc: reinette.chatre@intel.com
Link: http://lkml.kernel.org/r/1501017287-28083-20-git-send-email-vikas.shivappa@linux.intel.com
parent 90c403e8
...@@ -71,6 +71,7 @@ cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); ...@@ -71,6 +71,7 @@ cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
struct rdt_resource rdt_resources_all[] = { struct rdt_resource rdt_resources_all[] = {
[RDT_RESOURCE_L3] = [RDT_RESOURCE_L3] =
{ {
.rid = RDT_RESOURCE_L3,
.name = "L3", .name = "L3",
.domains = domain_init(RDT_RESOURCE_L3), .domains = domain_init(RDT_RESOURCE_L3),
.msr_base = IA32_L3_CBM_BASE, .msr_base = IA32_L3_CBM_BASE,
...@@ -87,6 +88,7 @@ struct rdt_resource rdt_resources_all[] = { ...@@ -87,6 +88,7 @@ struct rdt_resource rdt_resources_all[] = {
}, },
[RDT_RESOURCE_L3DATA] = [RDT_RESOURCE_L3DATA] =
{ {
.rid = RDT_RESOURCE_L3DATA,
.name = "L3DATA", .name = "L3DATA",
.domains = domain_init(RDT_RESOURCE_L3DATA), .domains = domain_init(RDT_RESOURCE_L3DATA),
.msr_base = IA32_L3_CBM_BASE, .msr_base = IA32_L3_CBM_BASE,
...@@ -103,6 +105,7 @@ struct rdt_resource rdt_resources_all[] = { ...@@ -103,6 +105,7 @@ struct rdt_resource rdt_resources_all[] = {
}, },
[RDT_RESOURCE_L3CODE] = [RDT_RESOURCE_L3CODE] =
{ {
.rid = RDT_RESOURCE_L3CODE,
.name = "L3CODE", .name = "L3CODE",
.domains = domain_init(RDT_RESOURCE_L3CODE), .domains = domain_init(RDT_RESOURCE_L3CODE),
.msr_base = IA32_L3_CBM_BASE, .msr_base = IA32_L3_CBM_BASE,
...@@ -119,6 +122,7 @@ struct rdt_resource rdt_resources_all[] = { ...@@ -119,6 +122,7 @@ struct rdt_resource rdt_resources_all[] = {
}, },
[RDT_RESOURCE_L2] = [RDT_RESOURCE_L2] =
{ {
.rid = RDT_RESOURCE_L2,
.name = "L2", .name = "L2",
.domains = domain_init(RDT_RESOURCE_L2), .domains = domain_init(RDT_RESOURCE_L2),
.msr_base = IA32_L2_CBM_BASE, .msr_base = IA32_L2_CBM_BASE,
...@@ -135,6 +139,7 @@ struct rdt_resource rdt_resources_all[] = { ...@@ -135,6 +139,7 @@ struct rdt_resource rdt_resources_all[] = {
}, },
[RDT_RESOURCE_MBA] = [RDT_RESOURCE_MBA] =
{ {
.rid = RDT_RESOURCE_MBA,
.name = "MB", .name = "MB",
.domains = domain_init(RDT_RESOURCE_MBA), .domains = domain_init(RDT_RESOURCE_MBA),
.msr_base = IA32_MBA_THRTL_BASE, .msr_base = IA32_MBA_THRTL_BASE,
...@@ -362,7 +367,7 @@ void rdt_ctrl_update(void *arg) ...@@ -362,7 +367,7 @@ void rdt_ctrl_update(void *arg)
* caller, return the first domain whose id is bigger than the input id. * caller, return the first domain whose id is bigger than the input id.
* The domain list is sorted by id in ascending order. * The domain list is sorted by id in ascending order.
*/ */
static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
struct list_head **pos) struct list_head **pos)
{ {
struct rdt_domain *d; struct rdt_domain *d;
......
...@@ -33,6 +33,27 @@ struct mon_evt { ...@@ -33,6 +33,27 @@ struct mon_evt {
struct list_head list; struct list_head list;
}; };
/**
* struct mon_data_bits - Monitoring details for each event file
* @rid: Resource id associated with the event file.
* @evtid: Event id associated with the event file
* @domid: The domain to which the event file belongs
*/
union mon_data_bits {
void *priv;
struct {
unsigned int rid : 10;
unsigned int evtid : 8;
unsigned int domid : 14;
} u;
};
struct rmid_read {
struct rdtgroup *rgrp;
int evtid;
u64 val;
};
extern unsigned int intel_cqm_threshold; extern unsigned int intel_cqm_threshold;
extern bool rdt_alloc_capable; extern bool rdt_alloc_capable;
extern bool rdt_mon_capable; extern bool rdt_mon_capable;
...@@ -46,11 +67,13 @@ enum rdt_group_type { ...@@ -46,11 +67,13 @@ enum rdt_group_type {
/** /**
* struct mongroup - store mon group's data in resctrl fs. * struct mongroup - store mon group's data in resctrl fs.
* @mon_data_kn kernlfs node for the mon_data directory
* @parent: parent rdtgrp * @parent: parent rdtgrp
* @crdtgrp_list: child rdtgroup node list * @crdtgrp_list: child rdtgroup node list
* @rmid: rmid for this rdtgroup * @rmid: rmid for this rdtgroup
*/ */
struct mongroup { struct mongroup {
struct kernfs_node *mon_data_kn;
struct rdtgroup *parent; struct rdtgroup *parent;
struct list_head crdtgrp_list; struct list_head crdtgrp_list;
u32 rmid; u32 rmid;
...@@ -209,6 +232,7 @@ static inline bool is_llc_occupancy_enabled(void) ...@@ -209,6 +232,7 @@ static inline bool is_llc_occupancy_enabled(void)
/** /**
* struct rdt_resource - attributes of an RDT resource * struct rdt_resource - attributes of an RDT resource
* @rid: The index of the resource
* @alloc_enabled: Is allocation enabled on this machine * @alloc_enabled: Is allocation enabled on this machine
* @mon_enabled: Is monitoring enabled for this feature * @mon_enabled: Is monitoring enabled for this feature
* @alloc_capable: Is allocation available on this machine * @alloc_capable: Is allocation available on this machine
...@@ -230,6 +254,7 @@ static inline bool is_llc_occupancy_enabled(void) ...@@ -230,6 +254,7 @@ static inline bool is_llc_occupancy_enabled(void)
* @fflags: flags to choose base and info files * @fflags: flags to choose base and info files
*/ */
struct rdt_resource { struct rdt_resource {
int rid;
bool alloc_enabled; bool alloc_enabled;
bool mon_enabled; bool mon_enabled;
bool alloc_capable; bool alloc_capable;
...@@ -323,6 +348,8 @@ union cpuid_0x10_x_edx { ...@@ -323,6 +348,8 @@ union cpuid_0x10_x_edx {
void rdt_ctrl_update(void *arg); void rdt_ctrl_update(void *arg);
struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
void rdtgroup_kn_unlock(struct kernfs_node *kn); void rdtgroup_kn_unlock(struct kernfs_node *kn);
struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
struct list_head **pos);
ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off); char *buf, size_t nbytes, loff_t off);
int rdtgroup_schemata_show(struct kernfs_open_file *of, int rdtgroup_schemata_show(struct kernfs_open_file *of,
...@@ -331,5 +358,7 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); ...@@ -331,5 +358,7 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
int alloc_rmid(void); int alloc_rmid(void);
void free_rmid(u32 rmid); void free_rmid(u32 rmid);
int rdt_get_mon_l3_config(struct rdt_resource *r); int rdt_get_mon_l3_config(struct rdt_resource *r);
void mon_event_count(void *info);
int rdtgroup_mondata_show(struct seq_file *m, void *arg);
#endif /* _ASM_X86_INTEL_RDT_H */ #endif /* _ASM_X86_INTEL_RDT_H */
...@@ -269,7 +269,8 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, ...@@ -269,7 +269,8 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
{ {
struct rdtgroup *rdtgrp; struct rdtgroup *rdtgrp;
struct rdt_resource *r; struct rdt_resource *r;
int closid, ret = 0; int ret = 0;
u32 closid;
rdtgrp = rdtgroup_kn_lock_live(of->kn); rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) { if (rdtgrp) {
...@@ -284,3 +285,55 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, ...@@ -284,3 +285,55 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
return ret; return ret;
} }
void mon_event_read(struct rmid_read *rr, struct rdt_domain *d,
struct rdtgroup *rdtgrp, int evtid)
{
/*
* setup the parameters to send to the IPI to read the data.
*/
rr->rgrp = rdtgrp;
rr->evtid = evtid;
rr->val = 0;
smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
}
int rdtgroup_mondata_show(struct seq_file *m, void *arg)
{
struct kernfs_open_file *of = m->private;
u32 resid, evtid, domid;
struct rdtgroup *rdtgrp;
struct rdt_resource *r;
union mon_data_bits md;
struct rdt_domain *d;
struct rmid_read rr;
int ret = 0;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
md.priv = of->kn->priv;
resid = md.u.rid;
domid = md.u.domid;
evtid = md.u.evtid;
r = &rdt_resources_all[resid];
d = rdt_find_domain(r, domid, NULL);
if (!d) {
ret = -ENOENT;
goto out;
}
mon_event_read(&rr, d, rdtgrp, evtid);
if (rr.val & RMID_VAL_ERROR)
seq_puts(m, "Error\n");
else if (rr.val & RMID_VAL_UNAVAIL)
seq_puts(m, "Unavailable\n");
else
seq_printf(m, "%llu\n", rr.val * r->mon_scale);
out:
rdtgroup_kn_unlock(of->kn);
return ret;
}
...@@ -294,6 +294,55 @@ void free_rmid(u32 rmid) ...@@ -294,6 +294,55 @@ void free_rmid(u32 rmid)
list_add_tail(&entry->list, &rmid_free_lru); list_add_tail(&entry->list, &rmid_free_lru);
} }
static int __mon_event_count(u32 rmid, struct rmid_read *rr)
{
u64 tval;
tval = __rmid_read(rmid, rr->evtid);
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
rr->val = tval;
return -EINVAL;
}
switch (rr->evtid) {
case QOS_L3_OCCUP_EVENT_ID:
rr->val += tval;
return 0;
default:
/*
* Code would never reach here because
* an invalid event id would fail the __rmid_read.
*/
return -EINVAL;
}
}
/*
* This is called via IPI to read the CQM/MBM counters
* on a domain.
*/
void mon_event_count(void *info)
{
struct rdtgroup *rdtgrp, *entry;
struct rmid_read *rr = info;
struct list_head *head;
rdtgrp = rr->rgrp;
if (__mon_event_count(rdtgrp->mon.rmid, rr))
return;
/*
* For Ctrl groups read data from child monitor groups.
*/
head = &rdtgrp->mon.crdtgrp_list;
if (rdtgrp->type == RDTCTRL_GROUP) {
list_for_each_entry(entry, head, mon.crdtgrp_list) {
if (__mon_event_count(entry->mon.rmid, rr))
return;
}
}
}
static int dom_data_init(struct rdt_resource *r) static int dom_data_init(struct rdt_resource *r)
{ {
struct rmid_entry *entry = NULL; struct rmid_entry *entry = NULL;
......
...@@ -152,6 +152,11 @@ static struct kernfs_ops rdtgroup_kf_single_ops = { ...@@ -152,6 +152,11 @@ static struct kernfs_ops rdtgroup_kf_single_ops = {
.seq_show = rdtgroup_seqfile_show, .seq_show = rdtgroup_seqfile_show,
}; };
static struct kernfs_ops kf_mondata_ops = {
.atomic_write_len = PAGE_SIZE,
.seq_show = rdtgroup_mondata_show,
};
static bool is_cpu_list(struct kernfs_open_file *of) static bool is_cpu_list(struct kernfs_open_file *of)
{ {
struct rftype *rft = of->kn->priv; struct rftype *rft = of->kn->priv;
...@@ -1217,6 +1222,140 @@ static struct file_system_type rdt_fs_type = { ...@@ -1217,6 +1222,140 @@ static struct file_system_type rdt_fs_type = {
.kill_sb = rdt_kill_sb, .kill_sb = rdt_kill_sb,
}; };
static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
void *priv)
{
struct kernfs_node *kn;
int ret = 0;
kn = __kernfs_create_file(parent_kn, name, 0444, 0,
&kf_mondata_ops, priv, NULL, NULL);
if (IS_ERR(kn))
return PTR_ERR(kn);
ret = rdtgroup_kn_set_ugid(kn);
if (ret) {
kernfs_remove(kn);
return ret;
}
return ret;
}
static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
struct rdt_domain *d,
struct rdt_resource *r, struct rdtgroup *prgrp)
{
union mon_data_bits priv;
struct kernfs_node *kn;
struct mon_evt *mevt;
char name[32];
int ret;
sprintf(name, "mon_%s_%02d", r->name, d->id);
/* create the directory */
kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
if (IS_ERR(kn))
return PTR_ERR(kn);
/*
* This extra ref will be put in kernfs_remove() and guarantees
* that kn is always accessible.
*/
kernfs_get(kn);
ret = rdtgroup_kn_set_ugid(kn);
if (ret)
goto out_destroy;
if (WARN_ON(list_empty(&r->evt_list))) {
ret = -EPERM;
goto out_destroy;
}
priv.u.rid = r->rid;
priv.u.domid = d->id;
list_for_each_entry(mevt, &r->evt_list, list) {
priv.u.evtid = mevt->evtid;
ret = mon_addfile(kn, mevt->name, priv.priv);
if (ret)
goto out_destroy;
}
kernfs_activate(kn);
return 0;
out_destroy:
kernfs_remove(kn);
return ret;
}
static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
struct rdt_resource *r,
struct rdtgroup *prgrp)
{
struct rdt_domain *dom;
int ret;
list_for_each_entry(dom, &r->domains, list) {
ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
if (ret)
return ret;
}
return 0;
}
/*
* This creates a directory mon_data which contains the monitored data.
*
* mon_data has one directory for each domain whic are named
* in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
* with L3 domain looks as below:
* ./mon_data:
* mon_L3_00
* mon_L3_01
* mon_L3_02
* ...
*
* Each domain directory has one file per event:
* ./mon_L3_00/:
* llc_occupancy
*
*/
static int mkdir_mondata_all(struct kernfs_node *parent_kn,
struct rdtgroup *prgrp,
struct kernfs_node **dest_kn)
{
struct rdt_resource *r;
struct kernfs_node *kn;
int ret;
/*
* Create the mon_data directory first.
*/
ret = mongroup_create_dir(parent_kn, NULL, "mon_data", &kn);
if (ret)
return ret;
if (dest_kn)
*dest_kn = kn;
/*
* Create the subdirectories for each domain. Note that all events
* in a domain like L3 are grouped into a resource whose domain is L3
*/
for_each_mon_enabled_rdt_resource(r) {
ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
if (ret)
goto out_destroy;
}
return 0;
out_destroy:
kernfs_remove(kn);
return ret;
}
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
struct kernfs_node *prgrp_kn, struct kernfs_node *prgrp_kn,
const char *name, umode_t mode, const char *name, umode_t mode,
...@@ -1275,6 +1414,10 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -1275,6 +1414,10 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
if (ret < 0) if (ret < 0)
goto out_destroy; goto out_destroy;
rdtgrp->mon.rmid = ret; rdtgrp->mon.rmid = ret;
ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
if (ret)
goto out_idfree;
} }
kernfs_activate(kn); kernfs_activate(kn);
...@@ -1283,6 +1426,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -1283,6 +1426,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
*/ */
return 0; return 0;
out_idfree:
free_rmid(rdtgrp->mon.rmid);
out_destroy: out_destroy:
kernfs_remove(rdtgrp->kn); kernfs_remove(rdtgrp->kn);
out_free_rgrp: out_free_rgrp:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment