Commit 9ab021a1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_cache_for_6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 resource control updates from Borislav Petkov:

 - Add support for non-contiguous capacity bitmasks being added to
   Intel's CAT implementation

 - Other improvements to resctrl code: better configuration,
   simplifications, debugging support, fixes

* tag 'x86_cache_for_6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/resctrl: Display RMID of resource group
  x86/resctrl: Add support for the files of MON groups only
  x86/resctrl: Display CLOSID for resource group
  x86/resctrl: Introduce "-o debug" mount option
  x86/resctrl: Move default group file creation to mount
  x86/resctrl: Unwind properly from rdt_enable_ctx()
  x86/resctrl: Rename rftype flags for consistency
  x86/resctrl: Simplify rftype flag definitions
  x86/resctrl: Add multiple tasks to the resctrl group at once
  Documentation/x86: Document resctrl's new sparse_masks
  x86/resctrl: Add sparse_masks file in info
  x86/resctrl: Enable non-contiguous CBMs in Intel CAT
  x86/resctrl: Rename arch_has_sparse_bitmaps
  x86/resctrl: Fix remaining kernel-doc warnings
parents f84a52ee 4cee14bc
...@@ -35,7 +35,7 @@ about the feature from resctrl's info directory. ...@@ -35,7 +35,7 @@ about the feature from resctrl's info directory.
To use the feature mount the file system:: To use the feature mount the file system::
# mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps]] /sys/fs/resctrl # mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps][,debug]] /sys/fs/resctrl
mount options are: mount options are:
...@@ -46,6 +46,9 @@ mount options are: ...@@ -46,6 +46,9 @@ mount options are:
"mba_MBps": "mba_MBps":
Enable the MBA Software Controller(mba_sc) to specify MBA Enable the MBA Software Controller(mba_sc) to specify MBA
bandwidth in MBps bandwidth in MBps
"debug":
Make debug files accessible. Available debug files are annotated with
"Available only with debug option".
L2 and L3 CDP are controlled separately. L2 and L3 CDP are controlled separately.
...@@ -124,6 +127,13 @@ related to allocation: ...@@ -124,6 +127,13 @@ related to allocation:
"P": "P":
Corresponding region is pseudo-locked. No Corresponding region is pseudo-locked. No
sharing allowed. sharing allowed.
"sparse_masks":
Indicates if non-contiguous 1s value in CBM is supported.
"0":
Only contiguous 1s value in CBM is supported.
"1":
Non-contiguous 1s value in CBM is supported.
Memory bandwidth(MB) subdirectory contains the following files Memory bandwidth(MB) subdirectory contains the following files
with respect to allocation: with respect to allocation:
...@@ -299,7 +309,14 @@ All groups contain the following files: ...@@ -299,7 +309,14 @@ All groups contain the following files:
"tasks": "tasks":
Reading this file shows the list of all tasks that belong to Reading this file shows the list of all tasks that belong to
this group. Writing a task id to the file will add a task to the this group. Writing a task id to the file will add a task to the
group. If the group is a CTRL_MON group the task is removed from group. Multiple tasks can be added by separating the task ids
with commas. Tasks will be assigned sequentially. Multiple
failures are not supported. A single failure encountered while
attempting to assign a task will cause the operation to abort and
already added tasks before the failure will remain in the group.
Failures will be logged to /sys/fs/resctrl/info/last_cmd_status.
If the group is a CTRL_MON group the task is removed from
whichever previous CTRL_MON group owned the task and also from whichever previous CTRL_MON group owned the task and also from
any MON group that owned the task. If the group is a MON group, any MON group that owned the task. If the group is a MON group,
then the task must already belong to the CTRL_MON parent of this then the task must already belong to the CTRL_MON parent of this
...@@ -342,6 +359,10 @@ When control is enabled all CTRL_MON groups will also contain: ...@@ -342,6 +359,10 @@ When control is enabled all CTRL_MON groups will also contain:
file. On successful pseudo-locked region creation the mode will file. On successful pseudo-locked region creation the mode will
automatically change to "pseudo-locked". automatically change to "pseudo-locked".
"ctrl_hw_id":
Available only with debug option. The identifier used by hardware
for the control group. On x86 this is the CLOSID.
When monitoring is enabled all MON groups will also contain: When monitoring is enabled all MON groups will also contain:
"mon_data": "mon_data":
...@@ -355,6 +376,10 @@ When monitoring is enabled all MON groups will also contain: ...@@ -355,6 +376,10 @@ When monitoring is enabled all MON groups will also contain:
the sum for all tasks in the CTRL_MON group and all tasks in the sum for all tasks in the CTRL_MON group and all tasks in
MON groups. Please see example section for more details on usage. MON groups. Please see example section for more details on usage.
"mon_hw_id":
Available only with debug option. The identifier used by hardware
for the monitor group. On x86 this is the RMID.
Resource allocation rules Resource allocation rules
------------------------- -------------------------
...@@ -445,12 +470,13 @@ For cache resources we describe the portion of the cache that is available ...@@ -445,12 +470,13 @@ For cache resources we describe the portion of the cache that is available
for allocation using a bitmask. The maximum value of the mask is defined for allocation using a bitmask. The maximum value of the mask is defined
by each cpu model (and may be different for different cache levels). It by each cpu model (and may be different for different cache levels). It
is found using CPUID, but is also provided in the "info" directory of is found using CPUID, but is also provided in the "info" directory of
the resctrl file system in "info/{resource}/cbm_mask". Intel hardware the resctrl file system in "info/{resource}/cbm_mask". Some Intel hardware
requires that these masks have all the '1' bits in a contiguous block. So requires that these masks have all the '1' bits in a contiguous block. So
0x3, 0x6 and 0xC are legal 4-bit masks with two bits set, but 0x5, 0x9 0x3, 0x6 and 0xC are legal 4-bit masks with two bits set, but 0x5, 0x9
and 0xA are not. On a system with a 20-bit mask each bit represents 5% and 0xA are not. Check /sys/fs/resctrl/info/{resource}/sparse_masks
of the capacity of the cache. You could partition the cache into four if non-contiguous 1s value is supported. On a system with a 20-bit mask
equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000. each bit represents 5% of the capacity of the cache. You could partition
the cache into four equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000.
Memory bandwidth Allocation and monitoring Memory bandwidth Allocation and monitoring
========================================== ==========================================
......
...@@ -152,6 +152,7 @@ static inline void cache_alloc_hsw_probe(void) ...@@ -152,6 +152,7 @@ static inline void cache_alloc_hsw_probe(void)
r->cache.cbm_len = 20; r->cache.cbm_len = 20;
r->cache.shareable_bits = 0xc0000; r->cache.shareable_bits = 0xc0000;
r->cache.min_cbm_bits = 2; r->cache.min_cbm_bits = 2;
r->cache.arch_has_sparse_bitmasks = false;
r->alloc_capable = true; r->alloc_capable = true;
rdt_alloc_capable = true; rdt_alloc_capable = true;
...@@ -267,15 +268,18 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) ...@@ -267,15 +268,18 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
{ {
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
union cpuid_0x10_1_eax eax; union cpuid_0x10_1_eax eax;
union cpuid_0x10_x_ecx ecx;
union cpuid_0x10_x_edx edx; union cpuid_0x10_x_edx edx;
u32 ebx, ecx; u32 ebx;
cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx.full, &edx.full);
hw_res->num_closid = edx.split.cos_max + 1; hw_res->num_closid = edx.split.cos_max + 1;
r->cache.cbm_len = eax.split.cbm_len + 1; r->cache.cbm_len = eax.split.cbm_len + 1;
r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
r->cache.shareable_bits = ebx & r->default_ctrl; r->cache.shareable_bits = ebx & r->default_ctrl;
r->data_width = (r->cache.cbm_len + 3) / 4; r->data_width = (r->cache.cbm_len + 3) / 4;
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
r->cache.arch_has_sparse_bitmasks = ecx.split.noncont;
r->alloc_capable = true; r->alloc_capable = true;
} }
...@@ -872,7 +876,6 @@ static __init void rdt_init_res_defs_intel(void) ...@@ -872,7 +876,6 @@ static __init void rdt_init_res_defs_intel(void)
if (r->rid == RDT_RESOURCE_L3 || if (r->rid == RDT_RESOURCE_L3 ||
r->rid == RDT_RESOURCE_L2) { r->rid == RDT_RESOURCE_L2) {
r->cache.arch_has_sparse_bitmaps = false;
r->cache.arch_has_per_cpu_cfg = false; r->cache.arch_has_per_cpu_cfg = false;
r->cache.min_cbm_bits = 1; r->cache.min_cbm_bits = 1;
} else if (r->rid == RDT_RESOURCE_MBA) { } else if (r->rid == RDT_RESOURCE_MBA) {
...@@ -892,7 +895,7 @@ static __init void rdt_init_res_defs_amd(void) ...@@ -892,7 +895,7 @@ static __init void rdt_init_res_defs_amd(void)
if (r->rid == RDT_RESOURCE_L3 || if (r->rid == RDT_RESOURCE_L3 ||
r->rid == RDT_RESOURCE_L2) { r->rid == RDT_RESOURCE_L2) {
r->cache.arch_has_sparse_bitmaps = true; r->cache.arch_has_sparse_bitmasks = true;
r->cache.arch_has_per_cpu_cfg = true; r->cache.arch_has_per_cpu_cfg = true;
r->cache.min_cbm_bits = 0; r->cache.min_cbm_bits = 0;
} else if (r->rid == RDT_RESOURCE_MBA) { } else if (r->rid == RDT_RESOURCE_MBA) {
......
...@@ -87,10 +87,12 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, ...@@ -87,10 +87,12 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
/* /*
* Check whether a cache bit mask is valid. * Check whether a cache bit mask is valid.
* For Intel the SDM says: * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID:
* Please note that all (and only) contiguous '1' combinations * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1
* are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1
* Additionally Haswell requires at least two bits set. *
* Haswell does not support a non-contiguous 1s value and additionally
* requires at least two bits set.
* AMD allows non-contiguous bitmasks. * AMD allows non-contiguous bitmasks.
*/ */
static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
...@@ -113,8 +115,8 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) ...@@ -113,8 +115,8 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
first_bit = find_first_bit(&val, cbm_len); first_bit = find_first_bit(&val, cbm_len);
zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
/* Are non-contiguous bitmaps allowed? */ /* Are non-contiguous bitmasks allowed? */
if (!r->cache.arch_has_sparse_bitmaps && if (!r->cache.arch_has_sparse_bitmasks &&
(find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
return false; return false;
......
...@@ -59,6 +59,7 @@ struct rdt_fs_context { ...@@ -59,6 +59,7 @@ struct rdt_fs_context {
bool enable_cdpl2; bool enable_cdpl2;
bool enable_cdpl3; bool enable_cdpl3;
bool enable_mba_mbps; bool enable_mba_mbps;
bool enable_debug;
}; };
static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc)
...@@ -243,18 +244,17 @@ struct rdtgroup { ...@@ -243,18 +244,17 @@ struct rdtgroup {
*/ */
#define RFTYPE_INFO BIT(0) #define RFTYPE_INFO BIT(0)
#define RFTYPE_BASE BIT(1) #define RFTYPE_BASE BIT(1)
#define RF_CTRLSHIFT 4 #define RFTYPE_CTRL BIT(4)
#define RF_MONSHIFT 5 #define RFTYPE_MON BIT(5)
#define RF_TOPSHIFT 6 #define RFTYPE_TOP BIT(6)
#define RFTYPE_CTRL BIT(RF_CTRLSHIFT)
#define RFTYPE_MON BIT(RF_MONSHIFT)
#define RFTYPE_TOP BIT(RF_TOPSHIFT)
#define RFTYPE_RES_CACHE BIT(8) #define RFTYPE_RES_CACHE BIT(8)
#define RFTYPE_RES_MB BIT(9) #define RFTYPE_RES_MB BIT(9)
#define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) #define RFTYPE_DEBUG BIT(10)
#define RF_MON_INFO (RFTYPE_INFO | RFTYPE_MON) #define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL)
#define RF_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) #define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON)
#define RF_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) #define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP)
#define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL)
#define RFTYPE_MON_BASE (RFTYPE_BASE | RFTYPE_MON)
/* List of all resource groups */ /* List of all resource groups */
extern struct list_head rdt_all_groups; extern struct list_head rdt_all_groups;
...@@ -270,7 +270,7 @@ void __exit rdtgroup_exit(void); ...@@ -270,7 +270,7 @@ void __exit rdtgroup_exit(void);
* @mode: Access mode * @mode: Access mode
* @kf_ops: File operations * @kf_ops: File operations
* @flags: File specific RFTYPE_FLAGS_* flags * @flags: File specific RFTYPE_FLAGS_* flags
* @fflags: File specific RF_* or RFTYPE_* flags * @fflags: File specific RFTYPE_* flags
* @seq_show: Show content of the file * @seq_show: Show content of the file
* @write: Write to the file * @write: Write to the file
*/ */
...@@ -492,6 +492,15 @@ union cpuid_0x10_3_eax { ...@@ -492,6 +492,15 @@ union cpuid_0x10_3_eax {
unsigned int full; unsigned int full;
}; };
/* CPUID.(EAX=10H, ECX=ResID).ECX */
union cpuid_0x10_x_ecx {
struct {
unsigned int reserved:3;
unsigned int noncont:1;
} split;
unsigned int full;
};
/* CPUID.(EAX=10H, ECX=ResID).EDX */ /* CPUID.(EAX=10H, ECX=ResID).EDX */
union cpuid_0x10_x_edx { union cpuid_0x10_x_edx {
struct { struct {
......
...@@ -54,8 +54,13 @@ static struct kernfs_node *kn_mondata; ...@@ -54,8 +54,13 @@ static struct kernfs_node *kn_mondata;
static struct seq_buf last_cmd_status; static struct seq_buf last_cmd_status;
static char last_cmd_status_buf[512]; static char last_cmd_status_buf[512];
static int rdtgroup_setup_root(struct rdt_fs_context *ctx);
static void rdtgroup_destroy_root(void);
struct dentry *debugfs_resctrl; struct dentry *debugfs_resctrl;
static bool resctrl_debug;
void rdt_last_cmd_clear(void) void rdt_last_cmd_clear(void)
{ {
lockdep_assert_held(&rdtgroup_mutex); lockdep_assert_held(&rdtgroup_mutex);
...@@ -696,11 +701,10 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, ...@@ -696,11 +701,10 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off) char *buf, size_t nbytes, loff_t off)
{ {
struct rdtgroup *rdtgrp; struct rdtgroup *rdtgrp;
char *pid_str;
int ret = 0; int ret = 0;
pid_t pid; pid_t pid;
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
rdtgrp = rdtgroup_kn_lock_live(of->kn); rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (!rdtgrp) { if (!rdtgrp) {
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
...@@ -715,7 +719,27 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, ...@@ -715,7 +719,27 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
goto unlock; goto unlock;
} }
while (buf && buf[0] != '\0' && buf[0] != '\n') {
pid_str = strim(strsep(&buf, ","));
if (kstrtoint(pid_str, 0, &pid)) {
rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str);
ret = -EINVAL;
break;
}
if (pid < 0) {
rdt_last_cmd_printf("Invalid pid %d\n", pid);
ret = -EINVAL;
break;
}
ret = rdtgroup_move_task(pid, rdtgrp, of); ret = rdtgroup_move_task(pid, rdtgrp, of);
if (ret) {
rdt_last_cmd_printf("Error while processing task %d\n", pid);
break;
}
}
unlock: unlock:
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
...@@ -755,6 +779,38 @@ static int rdtgroup_tasks_show(struct kernfs_open_file *of, ...@@ -755,6 +779,38 @@ static int rdtgroup_tasks_show(struct kernfs_open_file *of,
return ret; return ret;
} }
static int rdtgroup_closid_show(struct kernfs_open_file *of,
struct seq_file *s, void *v)
{
struct rdtgroup *rdtgrp;
int ret = 0;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp)
seq_printf(s, "%u\n", rdtgrp->closid);
else
ret = -ENOENT;
rdtgroup_kn_unlock(of->kn);
return ret;
}
static int rdtgroup_rmid_show(struct kernfs_open_file *of,
struct seq_file *s, void *v)
{
struct rdtgroup *rdtgrp;
int ret = 0;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp)
seq_printf(s, "%u\n", rdtgrp->mon.rmid);
else
ret = -ENOENT;
rdtgroup_kn_unlock(of->kn);
return ret;
}
#ifdef CONFIG_PROC_CPU_RESCTRL #ifdef CONFIG_PROC_CPU_RESCTRL
/* /*
...@@ -895,7 +951,7 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of, ...@@ -895,7 +951,7 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of,
return 0; return 0;
} }
/** /*
* rdt_bit_usage_show - Display current usage of resources * rdt_bit_usage_show - Display current usage of resources
* *
* A domain is a shared resource that can now be allocated differently. Here * A domain is a shared resource that can now be allocated differently. Here
...@@ -1117,12 +1173,24 @@ static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) ...@@ -1117,12 +1173,24 @@ static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
} }
} }
static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
struct resctrl_schema *s = of->kn->parent->priv;
struct rdt_resource *r = s->res;
seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks);
return 0;
}
/** /**
* __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
* @r: Resource to which domain instance @d belongs. * @r: Resource to which domain instance @d belongs.
* @d: The domain instance for which @closid is being tested. * @d: The domain instance for which @closid is being tested.
* @cbm: Capacity bitmask being tested. * @cbm: Capacity bitmask being tested.
* @closid: Intended closid for @cbm. * @closid: Intended closid for @cbm.
* @type: CDP type of @r.
* @exclusive: Only check if overlaps with exclusive resource groups * @exclusive: Only check if overlaps with exclusive resource groups
* *
* Checks if provided @cbm intended to be used for @closid on domain * Checks if provided @cbm intended to be used for @closid on domain
...@@ -1209,6 +1277,7 @@ bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, ...@@ -1209,6 +1277,7 @@ bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
/** /**
* rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
* @rdtgrp: Resource group identified through its closid.
* *
* An exclusive resource group implies that there should be no sharing of * An exclusive resource group implies that there should be no sharing of
* its allocated resources. At the time this group is considered to be * its allocated resources. At the time this group is considered to be
...@@ -1251,9 +1320,8 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) ...@@ -1251,9 +1320,8 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
return true; return true;
} }
/** /*
* rdtgroup_mode_write - Modify the resource group's mode * rdtgroup_mode_write - Modify the resource group's mode
*
*/ */
static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off) char *buf, size_t nbytes, loff_t off)
...@@ -1357,12 +1425,11 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, ...@@ -1357,12 +1425,11 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
return size; return size;
} }
/** /*
* rdtgroup_size_show - Display size in bytes of allocated regions * rdtgroup_size_show - Display size in bytes of allocated regions
* *
* The "size" file mirrors the layout of the "schemata" file, printing the * The "size" file mirrors the layout of the "schemata" file, printing the
* size in bytes of each region instead of the capacity bitmask. * size in bytes of each region instead of the capacity bitmask.
*
*/ */
static int rdtgroup_size_show(struct kernfs_open_file *of, static int rdtgroup_size_show(struct kernfs_open_file *of,
struct seq_file *s, void *v) struct seq_file *s, void *v)
...@@ -1686,77 +1753,77 @@ static struct rftype res_common_files[] = { ...@@ -1686,77 +1753,77 @@ static struct rftype res_common_files[] = {
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_last_cmd_status_show, .seq_show = rdt_last_cmd_status_show,
.fflags = RF_TOP_INFO, .fflags = RFTYPE_TOP_INFO,
}, },
{ {
.name = "num_closids", .name = "num_closids",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_num_closids_show, .seq_show = rdt_num_closids_show,
.fflags = RF_CTRL_INFO, .fflags = RFTYPE_CTRL_INFO,
}, },
{ {
.name = "mon_features", .name = "mon_features",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_mon_features_show, .seq_show = rdt_mon_features_show,
.fflags = RF_MON_INFO, .fflags = RFTYPE_MON_INFO,
}, },
{ {
.name = "num_rmids", .name = "num_rmids",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_num_rmids_show, .seq_show = rdt_num_rmids_show,
.fflags = RF_MON_INFO, .fflags = RFTYPE_MON_INFO,
}, },
{ {
.name = "cbm_mask", .name = "cbm_mask",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_default_ctrl_show, .seq_show = rdt_default_ctrl_show,
.fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
}, },
{ {
.name = "min_cbm_bits", .name = "min_cbm_bits",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_min_cbm_bits_show, .seq_show = rdt_min_cbm_bits_show,
.fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
}, },
{ {
.name = "shareable_bits", .name = "shareable_bits",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_shareable_bits_show, .seq_show = rdt_shareable_bits_show,
.fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
}, },
{ {
.name = "bit_usage", .name = "bit_usage",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_bit_usage_show, .seq_show = rdt_bit_usage_show,
.fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
}, },
{ {
.name = "min_bandwidth", .name = "min_bandwidth",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_min_bw_show, .seq_show = rdt_min_bw_show,
.fflags = RF_CTRL_INFO | RFTYPE_RES_MB, .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
}, },
{ {
.name = "bandwidth_gran", .name = "bandwidth_gran",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_bw_gran_show, .seq_show = rdt_bw_gran_show,
.fflags = RF_CTRL_INFO | RFTYPE_RES_MB, .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
}, },
{ {
.name = "delay_linear", .name = "delay_linear",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_delay_linear_show, .seq_show = rdt_delay_linear_show,
.fflags = RF_CTRL_INFO | RFTYPE_RES_MB, .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
}, },
/* /*
* Platform specific which (if any) capabilities are provided by * Platform specific which (if any) capabilities are provided by
...@@ -1775,7 +1842,7 @@ static struct rftype res_common_files[] = { ...@@ -1775,7 +1842,7 @@ static struct rftype res_common_files[] = {
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.write = max_threshold_occ_write, .write = max_threshold_occ_write,
.seq_show = max_threshold_occ_show, .seq_show = max_threshold_occ_show,
.fflags = RF_MON_INFO | RFTYPE_RES_CACHE, .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE,
}, },
{ {
.name = "mbm_total_bytes_config", .name = "mbm_total_bytes_config",
...@@ -1816,13 +1883,20 @@ static struct rftype res_common_files[] = { ...@@ -1816,13 +1883,20 @@ static struct rftype res_common_files[] = {
.seq_show = rdtgroup_tasks_show, .seq_show = rdtgroup_tasks_show,
.fflags = RFTYPE_BASE, .fflags = RFTYPE_BASE,
}, },
{
.name = "mon_hw_id",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdtgroup_rmid_show,
.fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG,
},
{ {
.name = "schemata", .name = "schemata",
.mode = 0644, .mode = 0644,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.write = rdtgroup_schemata_write, .write = rdtgroup_schemata_write,
.seq_show = rdtgroup_schemata_show, .seq_show = rdtgroup_schemata_show,
.fflags = RF_CTRL_BASE, .fflags = RFTYPE_CTRL_BASE,
}, },
{ {
.name = "mode", .name = "mode",
...@@ -1830,14 +1904,28 @@ static struct rftype res_common_files[] = { ...@@ -1830,14 +1904,28 @@ static struct rftype res_common_files[] = {
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.write = rdtgroup_mode_write, .write = rdtgroup_mode_write,
.seq_show = rdtgroup_mode_show, .seq_show = rdtgroup_mode_show,
.fflags = RF_CTRL_BASE, .fflags = RFTYPE_CTRL_BASE,
}, },
{ {
.name = "size", .name = "size",
.mode = 0444, .mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops, .kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdtgroup_size_show, .seq_show = rdtgroup_size_show,
.fflags = RF_CTRL_BASE, .fflags = RFTYPE_CTRL_BASE,
},
{
.name = "sparse_masks",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_has_sparse_bitmasks_show,
.fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
},
{
.name = "ctrl_hw_id",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdtgroup_closid_show,
.fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG,
}, },
}; };
...@@ -1852,6 +1940,9 @@ static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) ...@@ -1852,6 +1940,9 @@ static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
lockdep_assert_held(&rdtgroup_mutex); lockdep_assert_held(&rdtgroup_mutex);
if (resctrl_debug)
fflags |= RFTYPE_DEBUG;
for (rft = rfts; rft < rfts + len; rft++) { for (rft = rfts; rft < rfts + len; rft++) {
if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) {
ret = rdtgroup_add_file(kn, rft); ret = rdtgroup_add_file(kn, rft);
...@@ -1894,7 +1985,7 @@ void __init thread_throttle_mode_init(void) ...@@ -1894,7 +1985,7 @@ void __init thread_throttle_mode_init(void)
if (!rft) if (!rft)
return; return;
rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB;
} }
void __init mbm_config_rftype_init(const char *config) void __init mbm_config_rftype_init(const char *config)
...@@ -1903,7 +1994,7 @@ void __init mbm_config_rftype_init(const char *config) ...@@ -1903,7 +1994,7 @@ void __init mbm_config_rftype_init(const char *config)
rft = rdtgroup_get_rftype_by_name(config); rft = rdtgroup_get_rftype_by_name(config);
if (rft) if (rft)
rft->fflags = RF_MON_INFO | RFTYPE_RES_CACHE; rft->fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE;
} }
/** /**
...@@ -2038,21 +2129,21 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) ...@@ -2038,21 +2129,21 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
if (IS_ERR(kn_info)) if (IS_ERR(kn_info))
return PTR_ERR(kn_info); return PTR_ERR(kn_info);
ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO);
if (ret) if (ret)
goto out_destroy; goto out_destroy;
/* loop over enabled controls, these are all alloc_capable */ /* loop over enabled controls, these are all alloc_capable */
list_for_each_entry(s, &resctrl_schema_all, list) { list_for_each_entry(s, &resctrl_schema_all, list) {
r = s->res; r = s->res;
fflags = r->fflags | RF_CTRL_INFO; fflags = r->fflags | RFTYPE_CTRL_INFO;
ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
if (ret) if (ret)
goto out_destroy; goto out_destroy;
} }
for_each_mon_capable_rdt_resource(r) { for_each_mon_capable_rdt_resource(r) {
fflags = r->fflags | RF_MON_INFO; fflags = r->fflags | RFTYPE_MON_INFO;
sprintf(name, "%s_MON", r->name); sprintf(name, "%s_MON", r->name);
ret = rdtgroup_mkdir_info_resdir(r, name, fflags); ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
if (ret) if (ret)
...@@ -2271,14 +2362,6 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) ...@@ -2271,14 +2362,6 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
return 0; return 0;
} }
static void cdp_disable_all(void)
{
if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
}
/* /*
* We don't allow rdtgroup directories to be created anywhere * We don't allow rdtgroup directories to be created anywhere
* except the root directory. Thus when looking for the rdtgroup * except the root directory. Thus when looking for the rdtgroup
...@@ -2358,19 +2441,47 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, ...@@ -2358,19 +2441,47 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn,
struct rdtgroup *prgrp, struct rdtgroup *prgrp,
struct kernfs_node **mon_data_kn); struct kernfs_node **mon_data_kn);
static void rdt_disable_ctx(void)
{
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
set_mba_sc(false);
resctrl_debug = false;
}
static int rdt_enable_ctx(struct rdt_fs_context *ctx) static int rdt_enable_ctx(struct rdt_fs_context *ctx)
{ {
int ret = 0; int ret = 0;
if (ctx->enable_cdpl2) if (ctx->enable_cdpl2) {
ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
if (ret)
goto out_done;
}
if (!ret && ctx->enable_cdpl3) if (ctx->enable_cdpl3) {
ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
if (ret)
goto out_cdpl2;
}
if (!ret && ctx->enable_mba_mbps) if (ctx->enable_mba_mbps) {
ret = set_mba_sc(true); ret = set_mba_sc(true);
if (ret)
goto out_cdpl3;
}
if (ctx->enable_debug)
resctrl_debug = true;
return 0;
out_cdpl3:
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
out_cdpl2:
resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
out_done:
return ret; return ret;
} }
...@@ -2463,6 +2574,7 @@ static void schemata_list_destroy(void) ...@@ -2463,6 +2574,7 @@ static void schemata_list_destroy(void)
static int rdt_get_tree(struct fs_context *fc) static int rdt_get_tree(struct fs_context *fc)
{ {
struct rdt_fs_context *ctx = rdt_fc2context(fc); struct rdt_fs_context *ctx = rdt_fc2context(fc);
unsigned long flags = RFTYPE_CTRL_BASE;
struct rdt_domain *dom; struct rdt_domain *dom;
struct rdt_resource *r; struct rdt_resource *r;
int ret; int ret;
...@@ -2477,18 +2589,31 @@ static int rdt_get_tree(struct fs_context *fc) ...@@ -2477,18 +2589,31 @@ static int rdt_get_tree(struct fs_context *fc)
goto out; goto out;
} }
ret = rdtgroup_setup_root(ctx);
if (ret)
goto out;
ret = rdt_enable_ctx(ctx); ret = rdt_enable_ctx(ctx);
if (ret < 0) if (ret)
goto out_cdp; goto out_root;
ret = schemata_list_create(); ret = schemata_list_create();
if (ret) { if (ret) {
schemata_list_destroy(); schemata_list_destroy();
goto out_mba; goto out_ctx;
} }
closid_init(); closid_init();
if (rdt_mon_capable)
flags |= RFTYPE_MON;
ret = rdtgroup_add_files(rdtgroup_default.kn, flags);
if (ret)
goto out_schemata_free;
kernfs_activate(rdtgroup_default.kn);
ret = rdtgroup_create_info_dir(rdtgroup_default.kn); ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
if (ret < 0) if (ret < 0)
goto out_schemata_free; goto out_schemata_free;
...@@ -2543,11 +2668,10 @@ static int rdt_get_tree(struct fs_context *fc) ...@@ -2543,11 +2668,10 @@ static int rdt_get_tree(struct fs_context *fc)
kernfs_remove(kn_info); kernfs_remove(kn_info);
out_schemata_free: out_schemata_free:
schemata_list_destroy(); schemata_list_destroy();
out_mba: out_ctx:
if (ctx->enable_mba_mbps) rdt_disable_ctx();
set_mba_sc(false); out_root:
out_cdp: rdtgroup_destroy_root();
cdp_disable_all();
out: out:
rdt_last_cmd_clear(); rdt_last_cmd_clear();
mutex_unlock(&rdtgroup_mutex); mutex_unlock(&rdtgroup_mutex);
...@@ -2559,6 +2683,7 @@ enum rdt_param { ...@@ -2559,6 +2683,7 @@ enum rdt_param {
Opt_cdp, Opt_cdp,
Opt_cdpl2, Opt_cdpl2,
Opt_mba_mbps, Opt_mba_mbps,
Opt_debug,
nr__rdt_params nr__rdt_params
}; };
...@@ -2566,6 +2691,7 @@ static const struct fs_parameter_spec rdt_fs_parameters[] = { ...@@ -2566,6 +2691,7 @@ static const struct fs_parameter_spec rdt_fs_parameters[] = {
fsparam_flag("cdp", Opt_cdp), fsparam_flag("cdp", Opt_cdp),
fsparam_flag("cdpl2", Opt_cdpl2), fsparam_flag("cdpl2", Opt_cdpl2),
fsparam_flag("mba_MBps", Opt_mba_mbps), fsparam_flag("mba_MBps", Opt_mba_mbps),
fsparam_flag("debug", Opt_debug),
{} {}
}; };
...@@ -2591,6 +2717,9 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) ...@@ -2591,6 +2717,9 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
return -EINVAL; return -EINVAL;
ctx->enable_mba_mbps = true; ctx->enable_mba_mbps = true;
return 0; return 0;
case Opt_debug:
ctx->enable_debug = true;
return 0;
} }
return -EINVAL; return -EINVAL;
...@@ -2618,7 +2747,6 @@ static int rdt_init_fs_context(struct fs_context *fc) ...@@ -2618,7 +2747,6 @@ static int rdt_init_fs_context(struct fs_context *fc)
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
ctx->kfc.root = rdt_root;
ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
fc->fs_private = &ctx->kfc; fc->fs_private = &ctx->kfc;
fc->ops = &rdt_fs_context_ops; fc->ops = &rdt_fs_context_ops;
...@@ -2779,16 +2907,16 @@ static void rdt_kill_sb(struct super_block *sb) ...@@ -2779,16 +2907,16 @@ static void rdt_kill_sb(struct super_block *sb)
cpus_read_lock(); cpus_read_lock();
mutex_lock(&rdtgroup_mutex); mutex_lock(&rdtgroup_mutex);
set_mba_sc(false); rdt_disable_ctx();
/*Put everything back to default values. */ /*Put everything back to default values. */
for_each_alloc_capable_rdt_resource(r) for_each_alloc_capable_rdt_resource(r)
reset_all_ctrls(r); reset_all_ctrls(r);
cdp_disable_all();
rmdir_all_sub(); rmdir_all_sub();
rdt_pseudo_lock_release(); rdt_pseudo_lock_release();
rdtgroup_default.mode = RDT_MODE_SHAREABLE; rdtgroup_default.mode = RDT_MODE_SHAREABLE;
schemata_list_destroy(); schemata_list_destroy();
rdtgroup_destroy_root();
static_branch_disable_cpuslocked(&rdt_alloc_enable_key); static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
static_branch_disable_cpuslocked(&rdt_mon_enable_key); static_branch_disable_cpuslocked(&rdt_mon_enable_key);
static_branch_disable_cpuslocked(&rdt_enable_key); static_branch_disable_cpuslocked(&rdt_enable_key);
...@@ -3170,8 +3298,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -3170,8 +3298,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
enum rdt_group_type rtype, struct rdtgroup **r) enum rdt_group_type rtype, struct rdtgroup **r)
{ {
struct rdtgroup *prdtgrp, *rdtgrp; struct rdtgroup *prdtgrp, *rdtgrp;
unsigned long files = 0;
struct kernfs_node *kn; struct kernfs_node *kn;
uint files = 0;
int ret; int ret;
prdtgrp = rdtgroup_kn_lock_live(parent_kn); prdtgrp = rdtgroup_kn_lock_live(parent_kn);
...@@ -3223,7 +3351,14 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -3223,7 +3351,14 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
goto out_destroy; goto out_destroy;
} }
files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); if (rtype == RDTCTRL_GROUP) {
files = RFTYPE_BASE | RFTYPE_CTRL;
if (rdt_mon_capable)
files |= RFTYPE_MON;
} else {
files = RFTYPE_BASE | RFTYPE_MON;
}
ret = rdtgroup_add_files(kn, files); ret = rdtgroup_add_files(kn, files);
if (ret) { if (ret) {
rdt_last_cmd_puts("kernfs fill error\n"); rdt_last_cmd_puts("kernfs fill error\n");
...@@ -3656,6 +3791,9 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) ...@@ -3656,6 +3791,9 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl))
seq_puts(seq, ",mba_MBps"); seq_puts(seq, ",mba_MBps");
if (resctrl_debug)
seq_puts(seq, ",debug");
return 0; return 0;
} }
...@@ -3666,10 +3804,8 @@ static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { ...@@ -3666,10 +3804,8 @@ static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
.show_options = rdtgroup_show_options, .show_options = rdtgroup_show_options,
}; };
static int __init rdtgroup_setup_root(void) static int rdtgroup_setup_root(struct rdt_fs_context *ctx)
{ {
int ret;
rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
KERNFS_ROOT_CREATE_DEACTIVATED | KERNFS_ROOT_CREATE_DEACTIVATED |
KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
...@@ -3677,6 +3813,20 @@ static int __init rdtgroup_setup_root(void) ...@@ -3677,6 +3813,20 @@ static int __init rdtgroup_setup_root(void)
if (IS_ERR(rdt_root)) if (IS_ERR(rdt_root))
return PTR_ERR(rdt_root); return PTR_ERR(rdt_root);
ctx->kfc.root = rdt_root;
rdtgroup_default.kn = kernfs_root_to_node(rdt_root);
return 0;
}
static void rdtgroup_destroy_root(void)
{
kernfs_destroy_root(rdt_root);
rdtgroup_default.kn = NULL;
}
static void __init rdtgroup_setup_default(void)
{
mutex_lock(&rdtgroup_mutex); mutex_lock(&rdtgroup_mutex);
rdtgroup_default.closid = 0; rdtgroup_default.closid = 0;
...@@ -3686,19 +3836,7 @@ static int __init rdtgroup_setup_root(void) ...@@ -3686,19 +3836,7 @@ static int __init rdtgroup_setup_root(void)
list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RF_CTRL_BASE);
if (ret) {
kernfs_destroy_root(rdt_root);
goto out;
}
rdtgroup_default.kn = kernfs_root_to_node(rdt_root);
kernfs_activate(rdtgroup_default.kn);
out:
mutex_unlock(&rdtgroup_mutex); mutex_unlock(&rdtgroup_mutex);
return ret;
} }
static void domain_destroy_mon_state(struct rdt_domain *d) static void domain_destroy_mon_state(struct rdt_domain *d)
...@@ -3820,13 +3958,11 @@ int __init rdtgroup_init(void) ...@@ -3820,13 +3958,11 @@ int __init rdtgroup_init(void)
seq_buf_init(&last_cmd_status, last_cmd_status_buf, seq_buf_init(&last_cmd_status, last_cmd_status_buf,
sizeof(last_cmd_status_buf)); sizeof(last_cmd_status_buf));
ret = rdtgroup_setup_root(); rdtgroup_setup_default();
if (ret)
return ret;
ret = sysfs_create_mount_point(fs_kobj, "resctrl"); ret = sysfs_create_mount_point(fs_kobj, "resctrl");
if (ret) if (ret)
goto cleanup_root; return ret;
ret = register_filesystem(&rdt_fs_type); ret = register_filesystem(&rdt_fs_type);
if (ret) if (ret)
...@@ -3859,8 +3995,6 @@ int __init rdtgroup_init(void) ...@@ -3859,8 +3995,6 @@ int __init rdtgroup_init(void)
cleanup_mountpoint: cleanup_mountpoint:
sysfs_remove_mount_point(fs_kobj, "resctrl"); sysfs_remove_mount_point(fs_kobj, "resctrl");
cleanup_root:
kernfs_destroy_root(rdt_root);
return ret; return ret;
} }
...@@ -3870,5 +4004,4 @@ void __exit rdtgroup_exit(void) ...@@ -3870,5 +4004,4 @@ void __exit rdtgroup_exit(void)
debugfs_remove_recursive(debugfs_resctrl); debugfs_remove_recursive(debugfs_resctrl);
unregister_filesystem(&rdt_fs_type); unregister_filesystem(&rdt_fs_type);
sysfs_remove_mount_point(fs_kobj, "resctrl"); sysfs_remove_mount_point(fs_kobj, "resctrl");
kernfs_destroy_root(rdt_root);
} }
...@@ -94,7 +94,7 @@ struct rdt_domain { ...@@ -94,7 +94,7 @@ struct rdt_domain {
* zero CBM. * zero CBM.
* @shareable_bits: Bitmask of shareable resource with other * @shareable_bits: Bitmask of shareable resource with other
* executing entities * executing entities
* @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid.
* @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
* level has CPU scope. * level has CPU scope.
*/ */
...@@ -102,7 +102,7 @@ struct resctrl_cache { ...@@ -102,7 +102,7 @@ struct resctrl_cache {
unsigned int cbm_len; unsigned int cbm_len;
unsigned int min_cbm_bits; unsigned int min_cbm_bits;
unsigned int shareable_bits; unsigned int shareable_bits;
bool arch_has_sparse_bitmaps; bool arch_has_sparse_bitmasks;
bool arch_has_per_cpu_cfg; bool arch_has_per_cpu_cfg;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment