Commit 723f1a0d authored by Babu Moger's avatar Babu Moger Committed by Borislav Petkov

x86/resctrl: Fixup the user-visible strings

Fix the messages in rdt_last_cmd_printf() and rdt_last_cmd_puts() to
make them more meaningful and consistent.

 [ bp: s/cpu/CPU/; s/mem\W/memory ]
Signed-off-by: default avatarBabu Moger <babu.moger@amd.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: "Chang S. Bae" <chang.seok.bae@intel.com>
Cc: David Miller <davem@davemloft.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Dmitry Safonov <dima@arista.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Kate Stewart <kstewart@linuxfoundation.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: <linux-doc@vger.kernel.org>
Cc: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Philippe Ombredanne <pombredanne@nexb.com>
Cc: Pu Wen <puwen@hygon.cn>
Cc: <qianyue.zj@alibaba-inc.com>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Reinette Chatre <reinette.chatre@intel.com>
Cc: Rian Hunter <rian@alum.mit.edu>
Cc: Sherry Hurwitz <sherry.hurwitz@amd.com>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: <xiaochen.shen@intel.com>
Link: https://lkml.kernel.org/r/20181121202811.4492-11-babu.moger@amd.com
parent 9f72f855
...@@ -70,7 +70,7 @@ int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, ...@@ -70,7 +70,7 @@ int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
unsigned long bw_val; unsigned long bw_val;
if (d->have_new_ctrl) { if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id); rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL; return -EINVAL;
} }
...@@ -96,12 +96,12 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r) ...@@ -96,12 +96,12 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
ret = kstrtoul(buf, 16, &val); ret = kstrtoul(buf, 16, &val);
if (ret) { if (ret) {
rdt_last_cmd_printf("non-hex character in mask %s\n", buf); rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
return false; return false;
} }
if (val == 0 || val > r->default_ctrl) { if (val == 0 || val > r->default_ctrl) {
rdt_last_cmd_puts("mask out of range\n"); rdt_last_cmd_puts("Mask out of range\n");
return false; return false;
} }
...@@ -109,12 +109,12 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r) ...@@ -109,12 +109,12 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) { if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val); rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
return false; return false;
} }
if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
rdt_last_cmd_printf("Need at least %d bits in mask\n", rdt_last_cmd_printf("Need at least %d bits in the mask\n",
r->cache.min_cbm_bits); r->cache.min_cbm_bits);
return false; return false;
} }
...@@ -134,7 +134,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, ...@@ -134,7 +134,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
u32 cbm_val; u32 cbm_val;
if (d->have_new_ctrl) { if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id); rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL; return -EINVAL;
} }
...@@ -144,7 +144,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, ...@@ -144,7 +144,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
*/ */
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
rdtgroup_pseudo_locked_in_hierarchy(d)) { rdtgroup_pseudo_locked_in_hierarchy(d)) {
rdt_last_cmd_printf("pseudo-locked region in hierarchy\n"); rdt_last_cmd_printf("Pseudo-locked region in hierarchy\n");
return -EINVAL; return -EINVAL;
} }
...@@ -163,14 +163,14 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, ...@@ -163,14 +163,14 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
* either is exclusive. * either is exclusive.
*/ */
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) { if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
rdt_last_cmd_printf("overlaps with exclusive group\n"); rdt_last_cmd_printf("Overlaps with exclusive group\n");
return -EINVAL; return -EINVAL;
} }
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) { if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
rdt_last_cmd_printf("overlaps with other group\n"); rdt_last_cmd_printf("0verlaps with other group\n");
return -EINVAL; return -EINVAL;
} }
} }
...@@ -292,7 +292,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, ...@@ -292,7 +292,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok,
if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid) if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
return parse_line(tok, r, rdtgrp); return parse_line(tok, r, rdtgrp);
} }
rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname); rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
return -EINVAL; return -EINVAL;
} }
...@@ -323,7 +323,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, ...@@ -323,7 +323,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
*/ */
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("resource group is pseudo-locked\n"); rdt_last_cmd_puts("Resource group is pseudo-locked\n");
goto out; goto out;
} }
......
...@@ -213,7 +213,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) ...@@ -213,7 +213,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
for_each_cpu(cpu, &plr->d->cpu_mask) { for_each_cpu(cpu, &plr->d->cpu_mask) {
pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
if (!pm_req) { if (!pm_req) {
rdt_last_cmd_puts("fail allocating mem for PM QoS\n"); rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_err;
} }
...@@ -222,7 +222,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) ...@@ -222,7 +222,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
DEV_PM_QOS_RESUME_LATENCY, DEV_PM_QOS_RESUME_LATENCY,
30); 30);
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_printf("fail to add latency req cpu%d\n", rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
cpu); cpu);
kfree(pm_req); kfree(pm_req);
ret = -1; ret = -1;
...@@ -289,7 +289,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) ...@@ -289,7 +289,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
plr->cpu = cpumask_first(&plr->d->cpu_mask); plr->cpu = cpumask_first(&plr->d->cpu_mask);
if (!cpu_online(plr->cpu)) { if (!cpu_online(plr->cpu)) {
rdt_last_cmd_printf("cpu %u associated with cache not online\n", rdt_last_cmd_printf("CPU %u associated with cache not online\n",
plr->cpu); plr->cpu);
ret = -ENODEV; ret = -ENODEV;
goto out_region; goto out_region;
...@@ -307,7 +307,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) ...@@ -307,7 +307,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
} }
ret = -1; ret = -1;
rdt_last_cmd_puts("unable to determine cache line size\n"); rdt_last_cmd_puts("Unable to determine cache line size\n");
out_region: out_region:
pseudo_lock_region_clear(plr); pseudo_lock_region_clear(plr);
return ret; return ret;
...@@ -361,14 +361,14 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) ...@@ -361,14 +361,14 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
* KMALLOC_MAX_SIZE. * KMALLOC_MAX_SIZE.
*/ */
if (plr->size > KMALLOC_MAX_SIZE) { if (plr->size > KMALLOC_MAX_SIZE) {
rdt_last_cmd_puts("requested region exceeds maximum size\n"); rdt_last_cmd_puts("Requested region exceeds maximum size\n");
ret = -E2BIG; ret = -E2BIG;
goto out_region; goto out_region;
} }
plr->kmem = kzalloc(plr->size, GFP_KERNEL); plr->kmem = kzalloc(plr->size, GFP_KERNEL);
if (!plr->kmem) { if (!plr->kmem) {
rdt_last_cmd_puts("unable to allocate memory\n"); rdt_last_cmd_puts("Unable to allocate memory\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_region; goto out_region;
} }
...@@ -665,7 +665,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) ...@@ -665,7 +665,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
* default closid associated with it. * default closid associated with it.
*/ */
if (rdtgrp == &rdtgroup_default) { if (rdtgrp == &rdtgroup_default) {
rdt_last_cmd_puts("cannot pseudo-lock default group\n"); rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
return -EINVAL; return -EINVAL;
} }
...@@ -707,17 +707,17 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) ...@@ -707,17 +707,17 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
*/ */
prefetch_disable_bits = get_prefetch_disable_bits(); prefetch_disable_bits = get_prefetch_disable_bits();
if (prefetch_disable_bits == 0) { if (prefetch_disable_bits == 0) {
rdt_last_cmd_puts("pseudo-locking not supported\n"); rdt_last_cmd_puts("Pseudo-locking not supported\n");
return -EINVAL; return -EINVAL;
} }
if (rdtgroup_monitor_in_progress(rdtgrp)) { if (rdtgroup_monitor_in_progress(rdtgrp)) {
rdt_last_cmd_puts("monitoring in progress\n"); rdt_last_cmd_puts("Monitoring in progress\n");
return -EINVAL; return -EINVAL;
} }
if (rdtgroup_tasks_assigned(rdtgrp)) { if (rdtgroup_tasks_assigned(rdtgrp)) {
rdt_last_cmd_puts("tasks assigned to resource group\n"); rdt_last_cmd_puts("Tasks assigned to resource group\n");
return -EINVAL; return -EINVAL;
} }
...@@ -727,13 +727,13 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) ...@@ -727,13 +727,13 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
} }
if (rdtgroup_locksetup_user_restrict(rdtgrp)) { if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
rdt_last_cmd_puts("unable to modify resctrl permissions\n"); rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
return -EIO; return -EIO;
} }
ret = pseudo_lock_init(rdtgrp); ret = pseudo_lock_init(rdtgrp);
if (ret) { if (ret) {
rdt_last_cmd_puts("unable to init pseudo-lock region\n"); rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
goto out_release; goto out_release;
} }
...@@ -770,7 +770,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) ...@@ -770,7 +770,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
if (rdt_mon_capable) { if (rdt_mon_capable) {
ret = alloc_rmid(); ret = alloc_rmid();
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("out of RMIDs\n"); rdt_last_cmd_puts("Out of RMIDs\n");
return ret; return ret;
} }
rdtgrp->mon.rmid = ret; rdtgrp->mon.rmid = ret;
...@@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) ...@@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
"pseudo_lock/%u", plr->cpu); "pseudo_lock/%u", plr->cpu);
if (IS_ERR(thread)) { if (IS_ERR(thread)) {
ret = PTR_ERR(thread); ret = PTR_ERR(thread);
rdt_last_cmd_printf("locking thread returned error %d\n", ret); rdt_last_cmd_printf("Locking thread returned error %d\n", ret);
goto out_cstates; goto out_cstates;
} }
...@@ -1322,13 +1322,13 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) ...@@ -1322,13 +1322,13 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
* the cleared, but not freed, plr struct resulting in an * the cleared, but not freed, plr struct resulting in an
* empty pseudo-locking loop. * empty pseudo-locking loop.
*/ */
rdt_last_cmd_puts("locking thread interrupted\n"); rdt_last_cmd_puts("Locking thread interrupted\n");
goto out_cstates; goto out_cstates;
} }
ret = pseudo_lock_minor_get(&new_minor); ret = pseudo_lock_minor_get(&new_minor);
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("unable to obtain a new minor number\n"); rdt_last_cmd_puts("Unable to obtain a new minor number\n");
goto out_cstates; goto out_cstates;
} }
...@@ -1360,7 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) ...@@ -1360,7 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
if (IS_ERR(dev)) { if (IS_ERR(dev)) {
ret = PTR_ERR(dev); ret = PTR_ERR(dev);
rdt_last_cmd_printf("failed to create character device: %d\n", rdt_last_cmd_printf("Failed to create character device: %d\n",
ret); ret);
goto out_debugfs; goto out_debugfs;
} }
......
...@@ -345,7 +345,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, ...@@ -345,7 +345,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
/* Check whether cpus belong to parent ctrl group */ /* Check whether cpus belong to parent ctrl group */
cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
if (cpumask_weight(tmpmask)) { if (cpumask_weight(tmpmask)) {
rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n"); rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
return -EINVAL; return -EINVAL;
} }
...@@ -470,14 +470,14 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -470,14 +470,14 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
rdt_last_cmd_clear(); rdt_last_cmd_clear();
if (!rdtgrp) { if (!rdtgrp) {
ret = -ENOENT; ret = -ENOENT;
rdt_last_cmd_puts("directory was removed\n"); rdt_last_cmd_puts("Directory was removed\n");
goto unlock; goto unlock;
} }
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("pseudo-locking in progress\n"); rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto unlock; goto unlock;
} }
...@@ -487,7 +487,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -487,7 +487,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
ret = cpumask_parse(buf, newmask); ret = cpumask_parse(buf, newmask);
if (ret) { if (ret) {
rdt_last_cmd_puts("bad cpu list/mask\n"); rdt_last_cmd_puts("Bad CPU list/mask\n");
goto unlock; goto unlock;
} }
...@@ -495,7 +495,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -495,7 +495,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
cpumask_andnot(tmpmask, newmask, cpu_online_mask); cpumask_andnot(tmpmask, newmask, cpu_online_mask);
if (cpumask_weight(tmpmask)) { if (cpumask_weight(tmpmask)) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("can only assign online cpus\n"); rdt_last_cmd_puts("Can only assign online CPUs\n");
goto unlock; goto unlock;
} }
...@@ -574,7 +574,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk, ...@@ -574,7 +574,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
*/ */
atomic_dec(&rdtgrp->waitcount); atomic_dec(&rdtgrp->waitcount);
kfree(callback); kfree(callback);
rdt_last_cmd_puts("task exited\n"); rdt_last_cmd_puts("Task exited\n");
} else { } else {
/* /*
* For ctrl_mon groups move both closid and rmid. * For ctrl_mon groups move both closid and rmid.
...@@ -692,7 +692,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, ...@@ -692,7 +692,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("pseudo-locking in progress\n"); rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto unlock; goto unlock;
} }
...@@ -1158,14 +1158,14 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) ...@@ -1158,14 +1158,14 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
list_for_each_entry(d, &r->domains, list) { list_for_each_entry(d, &r->domains, list) {
if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
rdtgrp->closid, false)) { rdtgrp->closid, false)) {
rdt_last_cmd_puts("schemata overlaps\n"); rdt_last_cmd_puts("Schemata overlaps\n");
return false; return false;
} }
} }
} }
if (!has_cache) { if (!has_cache) {
rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n"); rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
return false; return false;
} }
...@@ -1206,7 +1206,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, ...@@ -1206,7 +1206,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
goto out; goto out;
if (mode == RDT_MODE_PSEUDO_LOCKED) { if (mode == RDT_MODE_PSEUDO_LOCKED) {
rdt_last_cmd_printf("cannot change pseudo-locked group\n"); rdt_last_cmd_printf("Cannot change pseudo-locked group\n");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -1235,7 +1235,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, ...@@ -1235,7 +1235,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
goto out; goto out;
rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
} else { } else {
rdt_last_cmd_printf("unknown/unsupported mode\n"); rdt_last_cmd_printf("Unknown orunsupported mode\n");
ret = -EINVAL; ret = -EINVAL;
} }
...@@ -2543,7 +2543,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) ...@@ -2543,7 +2543,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
tmp_cbm = d->new_ctrl; tmp_cbm = d->new_ctrl;
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
r->cache.min_cbm_bits) { r->cache.min_cbm_bits) {
rdt_last_cmd_printf("no space on %s:%d\n", rdt_last_cmd_printf("No space on %s:%d\n",
r->name, d->id); r->name, d->id);
return -ENOSPC; return -ENOSPC;
} }
...@@ -2560,7 +2560,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) ...@@ -2560,7 +2560,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
continue; continue;
ret = update_domains(r, rdtgrp->closid); ret = update_domains(r, rdtgrp->closid);
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("failed to initialize allocations\n"); rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret; return ret;
} }
rdtgrp->mode = RDT_MODE_SHAREABLE; rdtgrp->mode = RDT_MODE_SHAREABLE;
...@@ -2583,7 +2583,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -2583,7 +2583,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
rdt_last_cmd_clear(); rdt_last_cmd_clear();
if (!prdtgrp) { if (!prdtgrp) {
ret = -ENODEV; ret = -ENODEV;
rdt_last_cmd_puts("directory was removed\n"); rdt_last_cmd_puts("Directory was removed\n");
goto out_unlock; goto out_unlock;
} }
...@@ -2591,7 +2591,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -2591,7 +2591,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
(prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("pseudo-locking in progress\n"); rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto out_unlock; goto out_unlock;
} }
...@@ -2599,7 +2599,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -2599,7 +2599,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
if (!rdtgrp) { if (!rdtgrp) {
ret = -ENOSPC; ret = -ENOSPC;
rdt_last_cmd_puts("kernel out of memory\n"); rdt_last_cmd_puts("Kernel out of memory\n");
goto out_unlock; goto out_unlock;
} }
*r = rdtgrp; *r = rdtgrp;
...@@ -2640,7 +2640,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -2640,7 +2640,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
if (rdt_mon_capable) { if (rdt_mon_capable) {
ret = alloc_rmid(); ret = alloc_rmid();
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("out of RMIDs\n"); rdt_last_cmd_puts("Out of RMIDs\n");
goto out_destroy; goto out_destroy;
} }
rdtgrp->mon.rmid = ret; rdtgrp->mon.rmid = ret;
...@@ -2728,7 +2728,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, ...@@ -2728,7 +2728,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
kn = rdtgrp->kn; kn = rdtgrp->kn;
ret = closid_alloc(); ret = closid_alloc();
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("out of CLOSIDs\n"); rdt_last_cmd_puts("Out of CLOSIDs\n");
goto out_common_fail; goto out_common_fail;
} }
closid = ret; closid = ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment