Commit 97efd283 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-cleanups-2023-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull misc x86 cleanups from Ingo Molnar:
 "The following commit deserves special mention:

   22dc02f8 Revert "sched/fair: Move unused stub functions to header"

  This is in x86/cleanups, because the revert is a re-application of a
  number of cleanups that got removed inadvertedly"

[ This also effectively undoes the amd_check_microcode() microcode
  declaration change I had done in my microcode loader merge in commit
  42a7f6e3 ("Merge tag 'x86_microcode_for_v6.6_rc1' [...]").

  I picked the declaration change by Arnd from this branch instead,
  which put it in <asm/processor.h> instead of <asm/microcode.h> like I
  had done in my merge resolution   - Linus ]

* tag 'x86-cleanups-2023-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/platform/uv: Refactor code using deprecated strncpy() interface to use strscpy()
  x86/hpet: Refactor code using deprecated strncpy() interface to use strscpy()
  x86/platform/uv: Refactor code using deprecated strcpy()/strncpy() interfaces to use strscpy()
  x86/qspinlock-paravirt: Fix missing-prototype warning
  x86/paravirt: Silence unused native_pv_lock_init() function warning
  x86/alternative: Add a __alt_reloc_selftest() prototype
  x86/purgatory: Include header for warn() declaration
  x86/asm: Avoid unneeded __div64_32 function definition
  Revert "sched/fair: Move unused stub functions to header"
  x86/apic: Hide unused safe_smp_processor_id() on 32-bit UP
  x86/cpu: Fix amd_check_microcode() declaration
parents 3ca9a836 212f07a2
......@@ -7,7 +7,7 @@
#include "misc.h"
#include "error.h"
void warn(char *m)
void warn(const char *m)
{
error_putstr("\n\n");
error_putstr(m);
......
......@@ -4,7 +4,7 @@
#include <linux/compiler.h>
void warn(char *m);
void warn(const char *m);
void error(char *m) __noreturn;
void panic(const char *fmt, ...) __noreturn __cold;
......
......@@ -71,6 +71,12 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
}
#define mul_u32_u32 mul_u32_u32
/*
* __div64_32() is never called on x86, so prevent the
* generic definition from getting built.
*/
#define __div64_32
#else
# include <asm-generic/div64.h>
......
......@@ -75,10 +75,4 @@ void show_ucode_info_early(void);
static inline void show_ucode_info_early(void) { }
#endif /* !CONFIG_CPU_SUP_INTEL */
#ifdef CONFIG_CPU_SUP_AMD
void amd_check_microcode(void);
#else /* CONFIG_CPU_SUP_AMD */
static inline void amd_check_microcode(void) {}
#endif
#endif /* _ASM_X86_MICROCODE_H */
......@@ -739,6 +739,7 @@ static __always_inline unsigned long arch_local_irq_save(void)
".popsection")
extern void default_banner(void);
void native_pv_lock_init(void) __init;
#else /* __ASSEMBLY__ */
......@@ -778,6 +779,12 @@ extern void default_banner(void);
#endif /* __ASSEMBLY__ */
#else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
#ifndef __ASSEMBLY__
static inline void native_pv_lock_init(void)
{
}
#endif
#endif /* !CONFIG_PARAVIRT */
#ifndef __ASSEMBLY__
......
......@@ -678,11 +678,13 @@ extern u32 amd_get_nodes_per_socket(void);
extern u32 amd_get_highest_perf(void);
extern bool cpu_has_ibpb_brtype_microcode(void);
extern void amd_clear_divider(void);
extern void amd_check_microcode(void);
#else
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
static inline u32 amd_get_highest_perf(void) { return 0; }
static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
static inline void amd_clear_divider(void) { }
static inline void amd_check_microcode(void) { }
#endif
extern unsigned long arch_align_stack(unsigned long sp);
......
......@@ -74,8 +74,6 @@ static inline bool vcpu_is_preempted(long cpu)
*/
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
void native_pv_lock_init(void) __init;
/*
* Shortcut for the queued_spin_lock_slowpath() function that allows
* virt to hijack it.
......@@ -103,10 +101,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
return true;
}
#else
static inline void native_pv_lock_init(void)
{
}
#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>
......
......@@ -4,6 +4,8 @@
#include <asm/ibt.h>
void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
/*
* For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
* registers. For i386, however, only 1 32-bit register needs to be saved
......
......@@ -1527,6 +1527,7 @@ static noinline void __init int3_selftest(void)
static __initdata int __alt_reloc_selftest_addr;
extern void __init __alt_reloc_selftest(void *arg);
__visible noinline void __init __alt_reloc_selftest(void *arg)
{
WARN_ON(arg != &__alt_reloc_selftest_addr);
......
......@@ -301,6 +301,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
local_irq_restore(flags);
}
#ifdef CONFIG_SMP
/* must come after the send_IPI functions above for inlining */
static int convert_apicid_to_cpu(int apic_id)
{
......@@ -329,3 +330,4 @@ int safe_smp_processor_id(void)
return cpuid >= 0 ? cpuid : 0;
}
#endif
#endif
......@@ -294,8 +294,7 @@ static void __init early_get_apic_socketid_shift(void)
static void __init uv_stringify(int len, char *to, char *from)
{
/* Relies on 'to' being NULL chars so result will be NULL terminated */
strncpy(to, from, len-1);
strscpy(to, from, len);
/* Trim trailing spaces */
(void)strim(to);
......@@ -1013,7 +1012,7 @@ static void __init calc_mmioh_map(enum mmioh_arch index,
/* One (UV2) mapping */
if (index == UV2_MMIOH) {
strncpy(id, "MMIOH", sizeof(id));
strscpy(id, "MMIOH", sizeof(id));
max_io = max_pnode;
mapped = 0;
goto map_exit;
......
......@@ -421,7 +421,7 @@ static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc)
* the IO_APIC has been initialized.
*/
hc->cpu = boot_cpu_data.cpu_index;
strncpy(hc->name, "hpet", sizeof(hc->name));
strscpy(hc->name, "hpet", sizeof(hc->name));
hpet_init_clockevent(hc, 50);
hc->evt.tick_resume = hpet_clkevt_legacy_resume;
......
......@@ -75,7 +75,8 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
void __init native_pv_lock_init(void)
{
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_disable(&virt_spin_lock_key);
}
......
......@@ -202,21 +202,17 @@ static int param_set_action(const char *val, const struct kernel_param *kp)
{
int i;
int n = ARRAY_SIZE(valid_acts);
char arg[ACTION_LEN], *p;
char arg[ACTION_LEN];
/* (remove possible '\n') */
strncpy(arg, val, ACTION_LEN - 1);
arg[ACTION_LEN - 1] = '\0';
p = strchr(arg, '\n');
if (p)
*p = '\0';
strscpy(arg, val, strnchrnul(val, sizeof(arg)-1, '\n') - val + 1);
for (i = 0; i < n; i++)
if (!strcmp(arg, valid_acts[i].action))
break;
if (i < n) {
strcpy(uv_nmi_action, arg);
strscpy(uv_nmi_action, arg, sizeof(uv_nmi_action));
pr_info("UV: New NMI action:%s\n", uv_nmi_action);
return 0;
}
......@@ -959,7 +955,7 @@ static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
/* Unexpected return, revert action to "dump" */
if (master)
strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
strscpy(uv_nmi_action, "dump", sizeof(uv_nmi_action));
}
/* Pause as all CPU's enter the NMI handler */
......
......@@ -14,6 +14,7 @@
#include <crypto/sha2.h>
#include <asm/purgatory.h>
#include "../boot/compressed/error.h"
#include "../boot/string.h"
u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(".kexec-purgatory");
......
......@@ -485,6 +485,16 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
}
/*
* Include the architecture specific callee-save thunk of the
* __pv_queued_spin_unlock(). This thunk is put together with
* __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
* function close to each other sharing consecutive instruction cachelines.
* Alternatively, architecture specific version of __pv_queued_spin_unlock()
* can be defined.
*/
#include <asm/qspinlock_paravirt.h>
/*
* PV versions of the unlock fastpath and slowpath functions to be used
* instead of queued_spin_unlock().
......@@ -533,16 +543,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
pv_kick(node->cpu);
}
/*
* Include the architecture specific callee-save thunk of the
* __pv_queued_spin_unlock(). This thunk is put together with
* __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
* function close to each other sharing consecutive instruction cachelines.
* Alternatively, architecture specific version of __pv_queued_spin_unlock()
* can be defined.
*/
#include <asm/qspinlock_paravirt.h>
#ifndef __pv_queued_spin_unlock
__visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
{
......
......@@ -943,7 +943,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
/**************************************************************
* Scheduling class statistics methods:
*/
#ifdef CONFIG_SMP
int sched_update_scaling(void)
{
unsigned int factor = get_update_sysctl_factor();
......@@ -956,6 +956,7 @@ int sched_update_scaling(void)
return 0;
}
#endif
#endif
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
......@@ -6335,9 +6336,8 @@ static inline int throttled_lb_pair(struct task_group *tg,
return 0;
}
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent) {}
#ifdef CONFIG_FAIR_GROUP_SCHED
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent) {}
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
#endif
......
......@@ -1250,6 +1250,7 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
bool fi);
void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
/*
* Helpers to check if the CPU's core cookie matches with the task's cookie
......@@ -2421,6 +2422,7 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
#endif
extern void schedule_idle(void);
asmlinkage void schedule_user(void);
extern void sysrq_sched_debug_show(void);
extern void sched_init_granularity(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment