mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-27 16:53:57 +00:00
x86/mce/amd: Cleanup threshold device remove path
Pass in the bank pointer directly to the cleaning up functions, obviating the need for per-CPU accesses. Make the clean up path interrupt-safe by cleaning the bank pointer first so that the rest of the teardown happens safe from the thresholding interrupt. No functional changes. [ bp: Write commit message and reverse bank->shared test to save an indentation level in threshold_remove_bank(). ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20200403161943.1458-7-bp@alien8.de
This commit is contained in:
parent
6458de97fc
commit
f26d2580a7
2 changed files with 39 additions and 43 deletions
|
@ -57,6 +57,7 @@ struct threshold_bank {
|
||||||
|
|
||||||
/* initialized to the number of CPUs on the node sharing this bank */
|
/* initialized to the number of CPUs on the node sharing this bank */
|
||||||
refcount_t cpus;
|
refcount_t cpus;
|
||||||
|
unsigned int shared;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amd_northbridge {
|
struct amd_northbridge {
|
||||||
|
|
|
@ -1362,6 +1362,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_shared_bank(bank)) {
|
if (is_shared_bank(bank)) {
|
||||||
|
b->shared = 1;
|
||||||
refcount_set(&b->cpus, 1);
|
refcount_set(&b->cpus, 1);
|
||||||
|
|
||||||
/* nb is already initialized, see above */
|
/* nb is already initialized, see above */
|
||||||
|
@ -1391,21 +1392,16 @@ static void threshold_block_release(struct kobject *kobj)
|
||||||
kfree(to_block(kobj));
|
kfree(to_block(kobj));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
|
static void deallocate_threshold_blocks(struct threshold_bank *bank)
|
||||||
{
|
{
|
||||||
struct threshold_block *pos = NULL;
|
struct threshold_block *pos, *tmp;
|
||||||
struct threshold_block *tmp = NULL;
|
|
||||||
struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
|
|
||||||
|
|
||||||
if (!head)
|
list_for_each_entry_safe(pos, tmp, &bank->blocks->miscj, miscj) {
|
||||||
return;
|
|
||||||
|
|
||||||
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
|
|
||||||
list_del(&pos->miscj);
|
list_del(&pos->miscj);
|
||||||
kobject_put(&pos->kobj);
|
kobject_put(&pos->kobj);
|
||||||
}
|
}
|
||||||
|
|
||||||
kobject_put(&head->blocks->kobj);
|
kobject_put(&bank->blocks->kobj);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __threshold_remove_blocks(struct threshold_bank *b)
|
static void __threshold_remove_blocks(struct threshold_bank *b)
|
||||||
|
@ -1419,57 +1415,56 @@ static void __threshold_remove_blocks(struct threshold_bank *b)
|
||||||
kobject_del(&pos->kobj);
|
kobject_del(&pos->kobj);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void threshold_remove_bank(unsigned int cpu, int bank)
|
static void threshold_remove_bank(struct threshold_bank *bank)
|
||||||
{
|
{
|
||||||
struct amd_northbridge *nb;
|
struct amd_northbridge *nb;
|
||||||
struct threshold_bank *b;
|
|
||||||
|
|
||||||
b = per_cpu(threshold_banks, cpu)[bank];
|
if (!bank->blocks)
|
||||||
if (!b)
|
goto out_free;
|
||||||
|
|
||||||
|
if (!bank->shared)
|
||||||
|
goto out_dealloc;
|
||||||
|
|
||||||
|
if (!refcount_dec_and_test(&bank->cpus)) {
|
||||||
|
__threshold_remove_blocks(bank);
|
||||||
return;
|
return;
|
||||||
|
} else {
|
||||||
if (!b->blocks)
|
/*
|
||||||
goto free_out;
|
* The last CPU on this node using the shared bank is going
|
||||||
|
* away, remove that bank now.
|
||||||
if (is_shared_bank(bank)) {
|
*/
|
||||||
if (!refcount_dec_and_test(&b->cpus)) {
|
nb = node_to_amd_nb(amd_get_nb_id(smp_processor_id()));
|
||||||
__threshold_remove_blocks(b);
|
nb->bank4 = NULL;
|
||||||
per_cpu(threshold_banks, cpu)[bank] = NULL;
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* the last CPU on this node using the shared bank is
|
|
||||||
* going away, remove that bank now.
|
|
||||||
*/
|
|
||||||
nb = node_to_amd_nb(amd_get_nb_id(cpu));
|
|
||||||
nb->bank4 = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
deallocate_threshold_block(cpu, bank);
|
out_dealloc:
|
||||||
|
deallocate_threshold_blocks(bank);
|
||||||
|
|
||||||
free_out:
|
out_free:
|
||||||
kobject_del(b->kobj);
|
kobject_put(bank->kobj);
|
||||||
kobject_put(b->kobj);
|
kfree(bank);
|
||||||
kfree(b);
|
|
||||||
per_cpu(threshold_banks, cpu)[bank] = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int mce_threshold_remove_device(unsigned int cpu)
|
int mce_threshold_remove_device(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct threshold_bank **bp = this_cpu_read(threshold_banks);
|
struct threshold_bank **bp = this_cpu_read(threshold_banks);
|
||||||
unsigned int bank;
|
unsigned int bank, numbanks = this_cpu_read(mce_num_banks);
|
||||||
|
|
||||||
if (!bp)
|
if (!bp)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
|
/*
|
||||||
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
|
* Clear the pointer before cleaning up, so that the interrupt won't
|
||||||
continue;
|
* touch anything of this.
|
||||||
threshold_remove_bank(cpu, bank);
|
*/
|
||||||
}
|
|
||||||
/* Clear the pointer before freeing the memory */
|
|
||||||
this_cpu_write(threshold_banks, NULL);
|
this_cpu_write(threshold_banks, NULL);
|
||||||
|
|
||||||
|
for (bank = 0; bank < numbanks; bank++) {
|
||||||
|
if (bp[bank]) {
|
||||||
|
threshold_remove_bank(bp[bank]);
|
||||||
|
bp[bank] = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
kfree(bp);
|
kfree(bp);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue