Commit f26d2580 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Borislav Petkov

x86/mce/amd: Cleanup threshold device remove path

Pass in the bank pointer directly to the cleaning up functions,
obviating the need for per-CPU accesses. Make the clean up path
interrupt-safe by cleaning the bank pointer first so that the rest of
the teardown happens safe from the thresholding interrupt.

No functional changes.

 [ bp: Write commit message and reverse bank->shared test to save an
   indentation level in threshold_remove_bank(). ]
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20200403161943.1458-7-bp@alien8.de
parent 6458de97
...@@ -57,6 +57,7 @@ struct threshold_bank { ...@@ -57,6 +57,7 @@ struct threshold_bank {
/* initialized to the number of CPUs on the node sharing this bank */ /* initialized to the number of CPUs on the node sharing this bank */
refcount_t cpus; refcount_t cpus;
unsigned int shared;
}; };
struct amd_northbridge { struct amd_northbridge {
......
...@@ -1362,6 +1362,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu, ...@@ -1362,6 +1362,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
} }
if (is_shared_bank(bank)) { if (is_shared_bank(bank)) {
b->shared = 1;
refcount_set(&b->cpus, 1); refcount_set(&b->cpus, 1);
/* nb is already initialized, see above */ /* nb is already initialized, see above */
...@@ -1391,21 +1392,16 @@ static void threshold_block_release(struct kobject *kobj) ...@@ -1391,21 +1392,16 @@ static void threshold_block_release(struct kobject *kobj)
kfree(to_block(kobj)); kfree(to_block(kobj));
} }
static void deallocate_threshold_block(unsigned int cpu, unsigned int bank) static void deallocate_threshold_blocks(struct threshold_bank *bank)
{ {
struct threshold_block *pos = NULL; struct threshold_block *pos, *tmp;
struct threshold_block *tmp = NULL;
struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
if (!head)
return;
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { list_for_each_entry_safe(pos, tmp, &bank->blocks->miscj, miscj) {
list_del(&pos->miscj); list_del(&pos->miscj);
kobject_put(&pos->kobj); kobject_put(&pos->kobj);
} }
kobject_put(&head->blocks->kobj); kobject_put(&bank->blocks->kobj);
} }
static void __threshold_remove_blocks(struct threshold_bank *b) static void __threshold_remove_blocks(struct threshold_bank *b)
...@@ -1419,57 +1415,56 @@ static void __threshold_remove_blocks(struct threshold_bank *b) ...@@ -1419,57 +1415,56 @@ static void __threshold_remove_blocks(struct threshold_bank *b)
kobject_del(&pos->kobj); kobject_del(&pos->kobj);
} }
static void threshold_remove_bank(unsigned int cpu, int bank) static void threshold_remove_bank(struct threshold_bank *bank)
{ {
struct amd_northbridge *nb; struct amd_northbridge *nb;
struct threshold_bank *b;
b = per_cpu(threshold_banks, cpu)[bank]; if (!bank->blocks)
if (!b) goto out_free;
return;
if (!b->blocks) if (!bank->shared)
goto free_out; goto out_dealloc;
if (is_shared_bank(bank)) { if (!refcount_dec_and_test(&bank->cpus)) {
if (!refcount_dec_and_test(&b->cpus)) { __threshold_remove_blocks(bank);
__threshold_remove_blocks(b);
per_cpu(threshold_banks, cpu)[bank] = NULL;
return; return;
} else { } else {
/* /*
* the last CPU on this node using the shared bank is * The last CPU on this node using the shared bank is going
* going away, remove that bank now. * away, remove that bank now.
*/ */
nb = node_to_amd_nb(amd_get_nb_id(cpu)); nb = node_to_amd_nb(amd_get_nb_id(smp_processor_id()));
nb->bank4 = NULL; nb->bank4 = NULL;
} }
}
deallocate_threshold_block(cpu, bank); out_dealloc:
deallocate_threshold_blocks(bank);
free_out: out_free:
kobject_del(b->kobj); kobject_put(bank->kobj);
kobject_put(b->kobj); kfree(bank);
kfree(b);
per_cpu(threshold_banks, cpu)[bank] = NULL;
} }
int mce_threshold_remove_device(unsigned int cpu) int mce_threshold_remove_device(unsigned int cpu)
{ {
struct threshold_bank **bp = this_cpu_read(threshold_banks); struct threshold_bank **bp = this_cpu_read(threshold_banks);
unsigned int bank; unsigned int bank, numbanks = this_cpu_read(mce_num_banks);
if (!bp) if (!bp)
return 0; return 0;
for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) { /*
if (!(per_cpu(bank_map, cpu) & (1 << bank))) * Clear the pointer before cleaning up, so that the interrupt won't
continue; * touch anything of this.
threshold_remove_bank(cpu, bank); */
}
/* Clear the pointer before freeing the memory */
this_cpu_write(threshold_banks, NULL); this_cpu_write(threshold_banks, NULL);
for (bank = 0; bank < numbanks; bank++) {
if (bp[bank]) {
threshold_remove_bank(bp[bank]);
bp[bank] = NULL;
}
}
kfree(bp); kfree(bp);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment