Commit 158c998e authored by Ashwin Chaugule's avatar Ashwin Chaugule Committed by Rafael J. Wysocki

ACPI / CPPC: add sysfs support to compute delivered performance

The CPPC tables contain entries for per CPU feedback counters which
allows us to compute the delivered performance over a given interval
of time.

The math for delivered performance per the CPPCv5.0+ spec is:
  reference perf * delta(delivered perf ctr)/delta(ref perf ctr)

Maintaining deltas of the counters in the kernel is messy, as it
depends on when the reads are triggered. (e.g. via the cpufreq
->get() interface). Also the ->get() interace only returns one
value, so cant return raw values. So instead, leave it to userspace
to keep track of raw values and do its math for CPUs it cares about.

delivered and reference perf counters are exposed via the same
sysfs file to avoid the potential "skid", if these values are read
individually from userspace.
Signed-off-by: default avatarPrashanth Prakash <pprakash@codeaurora.org>
Signed-off-by: default avatarAshwin Chaugule <ashwin.chaugule@linaro.org>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent be8b88d7
...@@ -95,6 +95,17 @@ static unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; ...@@ -95,6 +95,17 @@ static unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
(cpc)->cpc_entry.reg.space_id == \ (cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_PLATFORM_COMM) ACPI_ADR_SPACE_PLATFORM_COMM)
/* Evalutes to True if reg is a NULL register descriptor */
#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
(reg)->address == 0 && \
(reg)->bit_width == 0 && \
(reg)->bit_offset == 0 && \
(reg)->access_width == 0)
/* Evalutes to True if an optional cpc field is supported */
#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
!!(cpc)->cpc_entry.int_value : \
!IS_NULL_REG(&(cpc)->cpc_entry.reg))
/* /*
* Arbitrary Retries in case the remote processor is slow to respond * Arbitrary Retries in case the remote processor is slow to respond
* to PCC commands. Keeping it high enough to cover emulators where * to PCC commands. Keeping it high enough to cover emulators where
...@@ -102,6 +113,71 @@ static unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; ...@@ -102,6 +113,71 @@ static unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
*/ */
#define NUM_RETRIES 500 #define NUM_RETRIES 500
struct cppc_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj,
struct attribute *attr, char *buf);
ssize_t (*store)(struct kobject *kobj,
struct attribute *attr, const char *c, ssize_t count);
};
#define define_one_cppc_ro(_name) \
static struct cppc_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
struct cppc_perf_fb_ctrs fb_ctrs = {0};
cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
fb_ctrs.reference, fb_ctrs.delivered);
}
define_one_cppc_ro(feedback_ctrs);
static ssize_t show_reference_perf(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
struct cppc_perf_fb_ctrs fb_ctrs = {0};
cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
return scnprintf(buf, PAGE_SIZE, "%llu\n",
fb_ctrs.reference_perf);
}
define_one_cppc_ro(reference_perf);
static ssize_t show_wraparound_time(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
struct cppc_perf_fb_ctrs fb_ctrs = {0};
cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
return scnprintf(buf, PAGE_SIZE, "%llu\n", fb_ctrs.ctr_wrap_time);
}
define_one_cppc_ro(wraparound_time);
static struct attribute *cppc_attrs[] = {
&feedback_ctrs.attr,
&reference_perf.attr,
&wraparound_time.attr,
NULL
};
static struct kobj_type cppc_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_attrs = cppc_attrs,
};
static int check_pcc_chan(void) static int check_pcc_chan(void)
{ {
int ret = -EIO; int ret = -EIO;
...@@ -555,6 +631,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) ...@@ -555,6 +631,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
union acpi_object *out_obj, *cpc_obj; union acpi_object *out_obj, *cpc_obj;
struct cpc_desc *cpc_ptr; struct cpc_desc *cpc_ptr;
struct cpc_reg *gas_t; struct cpc_reg *gas_t;
struct device *cpu_dev;
acpi_handle handle = pr->handle; acpi_handle handle = pr->handle;
unsigned int num_ent, i, cpc_rev; unsigned int num_ent, i, cpc_rev;
acpi_status status; acpi_status status;
...@@ -678,6 +755,16 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) ...@@ -678,6 +755,16 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Everything looks okay */ /* Everything looks okay */
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
/* Add per logical CPU nodes for reading its feedback counters. */
cpu_dev = get_cpu_device(pr->id);
if (!cpu_dev)
goto out_free;
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
"acpi_cppc");
if (ret)
goto out_free;
kfree(output.pointer); kfree(output.pointer);
return 0; return 0;
...@@ -708,6 +795,7 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr) ...@@ -708,6 +795,7 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
struct cpc_desc *cpc_ptr; struct cpc_desc *cpc_ptr;
unsigned int i; unsigned int i;
void __iomem *addr; void __iomem *addr;
cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
/* Free all the mapped sys mem areas for this CPU */ /* Free all the mapped sys mem areas for this CPU */
...@@ -717,6 +805,7 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr) ...@@ -717,6 +805,7 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
iounmap(addr); iounmap(addr);
} }
kobject_put(&cpc_ptr->kobj);
kfree(cpc_ptr); kfree(cpc_ptr);
} }
EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
...@@ -818,7 +907,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) ...@@ -818,7 +907,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf, struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
*nom_perf; *nom_perf;
u64 high, low, ref, nom; u64 high, low, nom;
int ret = 0, regs_in_pcc = 0; int ret = 0, regs_in_pcc = 0;
if (!cpc_desc) { if (!cpc_desc) {
...@@ -849,15 +938,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) ...@@ -849,15 +938,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
cpc_read(lowest_reg, &low); cpc_read(lowest_reg, &low);
perf_caps->lowest_perf = low; perf_caps->lowest_perf = low;
cpc_read(ref_perf, &ref);
perf_caps->reference_perf = ref;
cpc_read(nom_perf, &nom); cpc_read(nom_perf, &nom);
perf_caps->nominal_perf = nom; perf_caps->nominal_perf = nom;
if (!ref)
perf_caps->reference_perf = perf_caps->nominal_perf;
if (!high || !low || !nom) if (!high || !low || !nom)
ret = -EFAULT; ret = -EFAULT;
...@@ -878,8 +961,9 @@ EXPORT_SYMBOL_GPL(cppc_get_perf_caps); ...@@ -878,8 +961,9 @@ EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{ {
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *delivered_reg, *reference_reg; struct cpc_register_resource *delivered_reg, *reference_reg,
u64 delivered, reference; *ref_perf_reg, *ctr_wrap_reg;
u64 delivered, reference, ref_perf, ctr_wrap_time;
int ret = 0, regs_in_pcc = 0; int ret = 0, regs_in_pcc = 0;
if (!cpc_desc) { if (!cpc_desc) {
...@@ -889,9 +973,19 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) ...@@ -889,9 +973,19 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
/*
* If refernce perf register is not supported then we should
* use the nominal perf value
*/
if (!CPC_SUPPORTED(ref_perf_reg))
ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
/* Are any of the regs PCC ?*/ /* Are any of the regs PCC ?*/
if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg)) { if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
down_write(&pcc_lock); down_write(&pcc_lock);
regs_in_pcc = 1; regs_in_pcc = 1;
/* Ring doorbell once to update PCC subspace */ /* Ring doorbell once to update PCC subspace */
...@@ -903,21 +997,26 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) ...@@ -903,21 +997,26 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
cpc_read(delivered_reg, &delivered); cpc_read(delivered_reg, &delivered);
cpc_read(reference_reg, &reference); cpc_read(reference_reg, &reference);
cpc_read(ref_perf_reg, &ref_perf);
/*
* Per spec, if ctr_wrap_time optional register is unsupported, then the
* performance counters are assumed to never wrap during the lifetime of
* platform
*/
ctr_wrap_time = (u64)(~((u64)0));
if (CPC_SUPPORTED(ctr_wrap_reg))
cpc_read(ctr_wrap_reg, &ctr_wrap_time);
if (!delivered || !reference) { if (!delivered || !reference || !ref_perf) {
ret = -EFAULT; ret = -EFAULT;
goto out_err; goto out_err;
} }
perf_fb_ctrs->delivered = delivered; perf_fb_ctrs->delivered = delivered;
perf_fb_ctrs->reference = reference; perf_fb_ctrs->reference = reference;
perf_fb_ctrs->reference_perf = ref_perf;
perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered; perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time;
perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference;
perf_fb_ctrs->prev_delivered = delivered;
perf_fb_ctrs->prev_reference = reference;
out_err: out_err:
if (regs_in_pcc) if (regs_in_pcc)
up_write(&pcc_lock); up_write(&pcc_lock);
......
...@@ -65,6 +65,7 @@ struct cpc_desc { ...@@ -65,6 +65,7 @@ struct cpc_desc {
int write_cmd_id; int write_cmd_id;
struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT]; struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT];
struct acpi_psd_package domain_info; struct acpi_psd_package domain_info;
struct kobject kobj;
}; };
/* These are indexes into the per-cpu cpc_regs[]. Order is important. */ /* These are indexes into the per-cpu cpc_regs[]. Order is important. */
...@@ -99,7 +100,6 @@ enum cppc_regs { ...@@ -99,7 +100,6 @@ enum cppc_regs {
struct cppc_perf_caps { struct cppc_perf_caps {
u32 highest_perf; u32 highest_perf;
u32 nominal_perf; u32 nominal_perf;
u32 reference_perf;
u32 lowest_perf; u32 lowest_perf;
}; };
...@@ -111,9 +111,9 @@ struct cppc_perf_ctrls { ...@@ -111,9 +111,9 @@ struct cppc_perf_ctrls {
struct cppc_perf_fb_ctrs { struct cppc_perf_fb_ctrs {
u64 reference; u64 reference;
u64 prev_reference;
u64 delivered; u64 delivered;
u64 prev_delivered; u64 reference_perf;
u64 ctr_wrap_time;
}; };
/* Per CPU container for runtime CPPC management. */ /* Per CPU container for runtime CPPC management. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment