Commit 1b5c0b75 authored by Vikas Shivappa's avatar Vikas Shivappa Committed by Thomas Gleixner

x86/intel_rdt: Cleanup namespace to support RDT monitoring

Few of the data-structures have generic names although they are RDT
allocation specific. Rename them to be allocation specific to
accommodate RDT monitoring. E.g. s/enabled/alloc_enabled/

No functional change.
Signed-off-by: default avatarVikas Shivappa <vikas.shivappa@linux.intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: ravi.v.shankar@intel.com
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
Cc: peterz@infradead.org
Cc: eranian@google.com
Cc: vikas.shivappa@intel.com
Cc: ak@linux.intel.com
Cc: davidcc@google.com
Cc: reinette.chatre@intel.com
Link: http://lkml.kernel.org/r/1501017287-28083-7-git-send-email-vikas.shivappa@linux.intel.com
parent cb2200e9
...@@ -27,7 +27,7 @@ struct intel_pqr_state { ...@@ -27,7 +27,7 @@ struct intel_pqr_state {
DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid); DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
/* /*
* intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
...@@ -44,7 +44,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_enable_key); ...@@ -44,7 +44,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
*/ */
static inline void intel_rdt_sched_in(void) static inline void intel_rdt_sched_in(void)
{ {
if (static_branch_likely(&rdt_enable_key)) { if (static_branch_likely(&rdt_alloc_enable_key)) {
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
int closid; int closid;
......
...@@ -173,8 +173,8 @@ static inline bool cache_alloc_hsw_probe(void) ...@@ -173,8 +173,8 @@ static inline bool cache_alloc_hsw_probe(void)
r->default_ctrl = max_cbm; r->default_ctrl = max_cbm;
r->cache.cbm_len = 20; r->cache.cbm_len = 20;
r->cache.min_cbm_bits = 2; r->cache.min_cbm_bits = 2;
r->capable = true; r->alloc_capable = true;
r->enabled = true; r->alloc_enabled = true;
return true; return true;
} }
...@@ -224,8 +224,8 @@ static bool rdt_get_mem_config(struct rdt_resource *r) ...@@ -224,8 +224,8 @@ static bool rdt_get_mem_config(struct rdt_resource *r)
r->data_width = 3; r->data_width = 3;
rdt_get_mba_infofile(r); rdt_get_mba_infofile(r);
r->capable = true; r->alloc_capable = true;
r->enabled = true; r->alloc_enabled = true;
return true; return true;
} }
...@@ -242,8 +242,8 @@ static void rdt_get_cache_config(int idx, struct rdt_resource *r) ...@@ -242,8 +242,8 @@ static void rdt_get_cache_config(int idx, struct rdt_resource *r)
r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
r->data_width = (r->cache.cbm_len + 3) / 4; r->data_width = (r->cache.cbm_len + 3) / 4;
rdt_get_cache_infofile(r); rdt_get_cache_infofile(r);
r->capable = true; r->alloc_capable = true;
r->enabled = true; r->alloc_enabled = true;
} }
static void rdt_get_cdp_l3_config(int type) static void rdt_get_cdp_l3_config(int type)
...@@ -255,12 +255,12 @@ static void rdt_get_cdp_l3_config(int type) ...@@ -255,12 +255,12 @@ static void rdt_get_cdp_l3_config(int type)
r->cache.cbm_len = r_l3->cache.cbm_len; r->cache.cbm_len = r_l3->cache.cbm_len;
r->default_ctrl = r_l3->default_ctrl; r->default_ctrl = r_l3->default_ctrl;
r->data_width = (r->cache.cbm_len + 3) / 4; r->data_width = (r->cache.cbm_len + 3) / 4;
r->capable = true; r->alloc_capable = true;
/* /*
* By default, CDP is disabled. CDP can be enabled by mount parameter * By default, CDP is disabled. CDP can be enabled by mount parameter
* "cdp" during resctrl file system mount time. * "cdp" during resctrl file system mount time.
*/ */
r->enabled = false; r->alloc_enabled = false;
} }
static int get_cache_id(int cpu, int level) static int get_cache_id(int cpu, int level)
...@@ -422,7 +422,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) ...@@ -422,7 +422,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
d->id = id; d->id = id;
if (domain_setup_ctrlval(r, d)) { if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
kfree(d); kfree(d);
return; return;
} }
...@@ -464,7 +464,7 @@ static int intel_rdt_online_cpu(unsigned int cpu) ...@@ -464,7 +464,7 @@ static int intel_rdt_online_cpu(unsigned int cpu)
struct rdt_resource *r; struct rdt_resource *r;
mutex_lock(&rdtgroup_mutex); mutex_lock(&rdtgroup_mutex);
for_each_capable_rdt_resource(r) for_each_alloc_capable_rdt_resource(r)
domain_add_cpu(cpu, r); domain_add_cpu(cpu, r);
/* The cpu is set in default rdtgroup after online. */ /* The cpu is set in default rdtgroup after online. */
cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
...@@ -480,7 +480,7 @@ static int intel_rdt_offline_cpu(unsigned int cpu) ...@@ -480,7 +480,7 @@ static int intel_rdt_offline_cpu(unsigned int cpu)
struct rdt_resource *r; struct rdt_resource *r;
mutex_lock(&rdtgroup_mutex); mutex_lock(&rdtgroup_mutex);
for_each_capable_rdt_resource(r) for_each_alloc_capable_rdt_resource(r)
domain_remove_cpu(cpu, r); domain_remove_cpu(cpu, r);
list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask))
...@@ -501,7 +501,7 @@ static __init void rdt_init_padding(void) ...@@ -501,7 +501,7 @@ static __init void rdt_init_padding(void)
struct rdt_resource *r; struct rdt_resource *r;
int cl; int cl;
for_each_capable_rdt_resource(r) { for_each_alloc_capable_rdt_resource(r) {
cl = strlen(r->name); cl = strlen(r->name);
if (cl > max_name_width) if (cl > max_name_width)
max_name_width = cl; max_name_width = cl;
...@@ -565,7 +565,7 @@ static int __init intel_rdt_late_init(void) ...@@ -565,7 +565,7 @@ static int __init intel_rdt_late_init(void)
return ret; return ret;
} }
for_each_capable_rdt_resource(r) for_each_alloc_capable_rdt_resource(r)
pr_info("Intel RDT %s allocation detected\n", r->name); pr_info("Intel RDT %s allocation detected\n", r->name);
return 0; return 0;
......
...@@ -135,8 +135,8 @@ struct rdt_membw { ...@@ -135,8 +135,8 @@ struct rdt_membw {
/** /**
* struct rdt_resource - attributes of an RDT resource * struct rdt_resource - attributes of an RDT resource
* @enabled: Is this feature enabled on this machine * @alloc_enabled: Is allocation enabled on this machine
* @capable: Is this feature available on this machine * @alloc_capable: Is allocation available on this machine
* @name: Name to use in "schemata" file * @name: Name to use in "schemata" file
* @num_closid: Number of CLOSIDs available * @num_closid: Number of CLOSIDs available
* @cache_level: Which cache level defines scope of this resource * @cache_level: Which cache level defines scope of this resource
...@@ -152,8 +152,8 @@ struct rdt_membw { ...@@ -152,8 +152,8 @@ struct rdt_membw {
* @parse_ctrlval: Per resource function pointer to parse control values * @parse_ctrlval: Per resource function pointer to parse control values
*/ */
struct rdt_resource { struct rdt_resource {
bool enabled; bool alloc_enabled;
bool capable; bool alloc_capable;
char *name; char *name;
int num_closid; int num_closid;
int cache_level; int cache_level;
...@@ -181,7 +181,7 @@ extern struct mutex rdtgroup_mutex; ...@@ -181,7 +181,7 @@ extern struct mutex rdtgroup_mutex;
extern struct rdt_resource rdt_resources_all[]; extern struct rdt_resource rdt_resources_all[];
extern struct rdtgroup rdtgroup_default; extern struct rdtgroup rdtgroup_default;
DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
int __init rdtgroup_init(void); int __init rdtgroup_init(void);
...@@ -196,15 +196,15 @@ enum { ...@@ -196,15 +196,15 @@ enum {
RDT_NUM_RESOURCES, RDT_NUM_RESOURCES,
}; };
#define for_each_capable_rdt_resource(r) \ #define for_each_alloc_capable_rdt_resource(r) \
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
r++) \ r++) \
if (r->capable) if (r->alloc_capable)
#define for_each_enabled_rdt_resource(r) \ #define for_each_alloc_enabled_rdt_resource(r) \
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
r++) \ r++) \
if (r->enabled) if (r->alloc_enabled)
/* CPUID.(EAX=10H, ECX=ResID=1).EAX */ /* CPUID.(EAX=10H, ECX=ResID=1).EAX */
union cpuid_0x10_1_eax { union cpuid_0x10_1_eax {
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <asm/intel_rdt_sched.h> #include <asm/intel_rdt_sched.h>
#include "intel_rdt.h" #include "intel_rdt.h"
DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
static struct kernfs_root *rdt_root; static struct kernfs_root *rdt_root;
struct rdtgroup rdtgroup_default; struct rdtgroup rdtgroup_default;
LIST_HEAD(rdt_all_groups); LIST_HEAD(rdt_all_groups);
...@@ -66,7 +66,7 @@ static void closid_init(void) ...@@ -66,7 +66,7 @@ static void closid_init(void)
int rdt_min_closid = 32; int rdt_min_closid = 32;
/* Compute rdt_min_closid across all resources */ /* Compute rdt_min_closid across all resources */
for_each_enabled_rdt_resource(r) for_each_alloc_enabled_rdt_resource(r)
rdt_min_closid = min(rdt_min_closid, r->num_closid); rdt_min_closid = min(rdt_min_closid, r->num_closid);
closid_free_map = BIT_MASK(rdt_min_closid) - 1; closid_free_map = BIT_MASK(rdt_min_closid) - 1;
...@@ -638,7 +638,7 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) ...@@ -638,7 +638,7 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
return PTR_ERR(kn_info); return PTR_ERR(kn_info);
kernfs_get(kn_info); kernfs_get(kn_info);
for_each_enabled_rdt_resource(r) { for_each_alloc_enabled_rdt_resource(r) {
kn_subdir = kernfs_create_dir(kn_info, r->name, kn_subdir = kernfs_create_dir(kn_info, r->name,
kn_info->mode, r); kn_info->mode, r);
if (IS_ERR(kn_subdir)) { if (IS_ERR(kn_subdir)) {
...@@ -718,14 +718,15 @@ static int cdp_enable(void) ...@@ -718,14 +718,15 @@ static int cdp_enable(void)
struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3]; struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
int ret; int ret;
if (!r_l3->capable || !r_l3data->capable || !r_l3code->capable) if (!r_l3->alloc_capable || !r_l3data->alloc_capable ||
!r_l3code->alloc_capable)
return -EINVAL; return -EINVAL;
ret = set_l3_qos_cfg(r_l3, true); ret = set_l3_qos_cfg(r_l3, true);
if (!ret) { if (!ret) {
r_l3->enabled = false; r_l3->alloc_enabled = false;
r_l3data->enabled = true; r_l3data->alloc_enabled = true;
r_l3code->enabled = true; r_l3code->alloc_enabled = true;
} }
return ret; return ret;
} }
...@@ -734,11 +735,11 @@ static void cdp_disable(void) ...@@ -734,11 +735,11 @@ static void cdp_disable(void)
{ {
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
r->enabled = r->capable; r->alloc_enabled = r->alloc_capable;
if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) { if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) {
rdt_resources_all[RDT_RESOURCE_L3DATA].enabled = false; rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled = false;
rdt_resources_all[RDT_RESOURCE_L3CODE].enabled = false; rdt_resources_all[RDT_RESOURCE_L3CODE].alloc_enabled = false;
set_l3_qos_cfg(r, false); set_l3_qos_cfg(r, false);
} }
} }
...@@ -834,7 +835,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, ...@@ -834,7 +835,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
/* /*
* resctrl file system can only be mounted once. * resctrl file system can only be mounted once.
*/ */
if (static_branch_unlikely(&rdt_enable_key)) { if (static_branch_unlikely(&rdt_alloc_enable_key)) {
dentry = ERR_PTR(-EBUSY); dentry = ERR_PTR(-EBUSY);
goto out; goto out;
} }
...@@ -858,7 +859,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, ...@@ -858,7 +859,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
if (IS_ERR(dentry)) if (IS_ERR(dentry))
goto out_destroy; goto out_destroy;
static_branch_enable(&rdt_enable_key); static_branch_enable(&rdt_alloc_enable_key);
goto out; goto out;
out_destroy: out_destroy:
...@@ -986,11 +987,11 @@ static void rdt_kill_sb(struct super_block *sb) ...@@ -986,11 +987,11 @@ static void rdt_kill_sb(struct super_block *sb)
mutex_lock(&rdtgroup_mutex); mutex_lock(&rdtgroup_mutex);
/*Put everything back to default values. */ /*Put everything back to default values. */
for_each_enabled_rdt_resource(r) for_each_alloc_enabled_rdt_resource(r)
reset_all_ctrls(r); reset_all_ctrls(r);
cdp_disable(); cdp_disable();
rmdir_all_sub(); rmdir_all_sub();
static_branch_disable(&rdt_enable_key); static_branch_disable(&rdt_alloc_enable_key);
kernfs_kill_sb(sb); kernfs_kill_sb(sb);
mutex_unlock(&rdtgroup_mutex); mutex_unlock(&rdtgroup_mutex);
} }
...@@ -1129,7 +1130,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn) ...@@ -1129,7 +1130,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
{ {
if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
seq_puts(seq, ",cdp"); seq_puts(seq, ",cdp");
return 0; return 0;
} }
......
...@@ -192,7 +192,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, int closid) ...@@ -192,7 +192,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, int closid)
{ {
struct rdt_resource *r; struct rdt_resource *r;
for_each_enabled_rdt_resource(r) { for_each_alloc_enabled_rdt_resource(r) {
if (!strcmp(resname, r->name) && closid < r->num_closid) if (!strcmp(resname, r->name) && closid < r->num_closid)
return parse_line(tok, r); return parse_line(tok, r);
} }
...@@ -221,7 +221,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, ...@@ -221,7 +221,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
closid = rdtgrp->closid; closid = rdtgrp->closid;
for_each_enabled_rdt_resource(r) { for_each_alloc_enabled_rdt_resource(r) {
list_for_each_entry(dom, &r->domains, list) list_for_each_entry(dom, &r->domains, list)
dom->have_new_ctrl = false; dom->have_new_ctrl = false;
} }
...@@ -237,7 +237,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, ...@@ -237,7 +237,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
goto out; goto out;
} }
for_each_enabled_rdt_resource(r) { for_each_alloc_enabled_rdt_resource(r) {
ret = update_domains(r, closid); ret = update_domains(r, closid);
if (ret) if (ret)
goto out; goto out;
...@@ -274,7 +274,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, ...@@ -274,7 +274,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
rdtgrp = rdtgroup_kn_lock_live(of->kn); rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) { if (rdtgrp) {
closid = rdtgrp->closid; closid = rdtgrp->closid;
for_each_enabled_rdt_resource(r) { for_each_alloc_enabled_rdt_resource(r) {
if (closid < r->num_closid) if (closid < r->num_closid)
show_doms(s, r, closid); show_doms(s, r, closid);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment