Commit dfcb245e authored by Ingo Molnar's avatar Ingo Molnar

sched: Fix various typos in comments

Go over the scheduler source code and fix common typos
in comments - and a typo in an actual variable name.

No change in functionality intended.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5f675231
...@@ -176,7 +176,7 @@ struct task_group; ...@@ -176,7 +176,7 @@ struct task_group;
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
* *
* However, with slightly different timing the wakeup TASK_RUNNING store can * However, with slightly different timing the wakeup TASK_RUNNING store can
* also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
* a problem either because that will result in one extra go around the loop * a problem either because that will result in one extra go around the loop
* and our @cond test will save the day. * and our @cond test will save the day.
* *
...@@ -515,7 +515,7 @@ struct sched_dl_entity { ...@@ -515,7 +515,7 @@ struct sched_dl_entity {
/* /*
* Actual scheduling parameters. Initialized with the values above, * Actual scheduling parameters. Initialized with the values above,
* they are continously updated during task execution. Note that * they are continuously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun. * the remaining runtime could be < 0 in case we are in overrun.
*/ */
s64 runtime; /* Remaining runtime for this instance */ s64 runtime; /* Remaining runtime for this instance */
......
...@@ -16,7 +16,7 @@ enum hk_flags { ...@@ -16,7 +16,7 @@ enum hk_flags {
}; };
#ifdef CONFIG_CPU_ISOLATION #ifdef CONFIG_CPU_ISOLATION
DECLARE_STATIC_KEY_FALSE(housekeeping_overriden); DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
extern int housekeeping_any_cpu(enum hk_flags flags); extern int housekeeping_any_cpu(enum hk_flags flags);
extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
...@@ -43,7 +43,7 @@ static inline void housekeeping_init(void) { } ...@@ -43,7 +43,7 @@ static inline void housekeeping_init(void) { }
static inline bool housekeeping_cpu(int cpu, enum hk_flags flags) static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
{ {
#ifdef CONFIG_CPU_ISOLATION #ifdef CONFIG_CPU_ISOLATION
if (static_branch_unlikely(&housekeeping_overriden)) if (static_branch_unlikely(&housekeeping_overridden))
return housekeeping_test_cpu(cpu, flags); return housekeeping_test_cpu(cpu, flags);
#endif #endif
return true; return true;
......
...@@ -153,7 +153,7 @@ static inline gfp_t current_gfp_context(gfp_t flags) ...@@ -153,7 +153,7 @@ static inline gfp_t current_gfp_context(gfp_t flags)
{ {
/* /*
* NOIO implies both NOIO and NOFS and it is a weaker context * NOIO implies both NOIO and NOFS and it is a weaker context
* so always make sure it makes precendence * so always make sure it makes precedence
*/ */
if (unlikely(current->flags & PF_MEMALLOC_NOIO)) if (unlikely(current->flags & PF_MEMALLOC_NOIO))
flags &= ~(__GFP_IO | __GFP_FS); flags &= ~(__GFP_IO | __GFP_FS);
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* Various counters maintained by the scheduler and fork(), * Various counters maintained by the scheduler and fork(),
* exposed via /proc, sys.c or used by drivers via these APIs. * exposed via /proc, sys.c or used by drivers via these APIs.
* *
* ( Note that all these values are aquired without locking, * ( Note that all these values are acquired without locking,
* so they can only be relied on in narrow circumstances. ) * so they can only be relied on in narrow circumstances. )
*/ */
......
...@@ -2857,7 +2857,7 @@ unsigned long nr_running(void) ...@@ -2857,7 +2857,7 @@ unsigned long nr_running(void)
* preemption, thus the result might have a time-of-check-to-time-of-use * preemption, thus the result might have a time-of-check-to-time-of-use
* race. The caller is responsible to use it correctly, for example: * race. The caller is responsible to use it correctly, for example:
* *
* - from a non-preemptable section (of course) * - from a non-preemptible section (of course)
* *
* - from a thread that is bound to a single CPU * - from a thread that is bound to a single CPU
* *
......
...@@ -525,7 +525,7 @@ void account_idle_ticks(unsigned long ticks) ...@@ -525,7 +525,7 @@ void account_idle_ticks(unsigned long ticks)
/* /*
* Perform (stime * rtime) / total, but avoid multiplication overflow by * Perform (stime * rtime) / total, but avoid multiplication overflow by
* loosing precision when the numbers are big. * losing precision when the numbers are big.
*/ */
static u64 scale_stime(u64 stime, u64 rtime, u64 total) static u64 scale_stime(u64 stime, u64 rtime, u64 total)
{ {
......
...@@ -727,7 +727,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, ...@@ -727,7 +727,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
* refill the runtime and set the deadline a period in the future, * refill the runtime and set the deadline a period in the future,
* because keeping the current (absolute) deadline of the task would * because keeping the current (absolute) deadline of the task would
* result in breaking guarantees promised to other tasks (refer to * result in breaking guarantees promised to other tasks (refer to
* Documentation/scheduler/sched-deadline.txt for more informations). * Documentation/scheduler/sched-deadline.txt for more information).
* *
* This function returns true if: * This function returns true if:
* *
......
...@@ -703,9 +703,9 @@ void init_entity_runnable_average(struct sched_entity *se) ...@@ -703,9 +703,9 @@ void init_entity_runnable_average(struct sched_entity *se)
memset(sa, 0, sizeof(*sa)); memset(sa, 0, sizeof(*sa));
/* /*
* Tasks are intialized with full load to be seen as heavy tasks until * Tasks are initialized with full load to be seen as heavy tasks until
* they get a chance to stabilize to their real load level. * they get a chance to stabilize to their real load level.
* Group entities are intialized with zero load to reflect the fact that * Group entities are initialized with zero load to reflect the fact that
* nothing has been attached to the task group yet. * nothing has been attached to the task group yet.
*/ */
if (entity_is_task(se)) if (entity_is_task(se))
...@@ -3976,8 +3976,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -3976,8 +3976,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
/* /*
* When dequeuing a sched_entity, we must: * When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now. * - Update loads to have both entity and cfs_rq synced with now.
* - Substract its load from the cfs_rq->runnable_avg. * - Subtract its load from the cfs_rq->runnable_avg.
* - Substract its previous weight from cfs_rq->load.weight. * - Subtract its previous weight from cfs_rq->load.weight.
* - For group entity, update its weight to reflect the new share * - For group entity, update its weight to reflect the new share
* of its group cfs_rq. * of its group cfs_rq.
*/ */
......
...@@ -8,14 +8,14 @@ ...@@ -8,14 +8,14 @@
*/ */
#include "sched.h" #include "sched.h"
DEFINE_STATIC_KEY_FALSE(housekeeping_overriden); DEFINE_STATIC_KEY_FALSE(housekeeping_overridden);
EXPORT_SYMBOL_GPL(housekeeping_overriden); EXPORT_SYMBOL_GPL(housekeeping_overridden);
static cpumask_var_t housekeeping_mask; static cpumask_var_t housekeeping_mask;
static unsigned int housekeeping_flags; static unsigned int housekeeping_flags;
int housekeeping_any_cpu(enum hk_flags flags) int housekeeping_any_cpu(enum hk_flags flags)
{ {
if (static_branch_unlikely(&housekeeping_overriden)) if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags) if (housekeeping_flags & flags)
return cpumask_any_and(housekeeping_mask, cpu_online_mask); return cpumask_any_and(housekeeping_mask, cpu_online_mask);
return smp_processor_id(); return smp_processor_id();
...@@ -24,7 +24,7 @@ EXPORT_SYMBOL_GPL(housekeeping_any_cpu); ...@@ -24,7 +24,7 @@ EXPORT_SYMBOL_GPL(housekeeping_any_cpu);
const struct cpumask *housekeeping_cpumask(enum hk_flags flags) const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
{ {
if (static_branch_unlikely(&housekeeping_overriden)) if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags) if (housekeeping_flags & flags)
return housekeeping_mask; return housekeeping_mask;
return cpu_possible_mask; return cpu_possible_mask;
...@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(housekeeping_cpumask); ...@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(housekeeping_cpumask);
void housekeeping_affine(struct task_struct *t, enum hk_flags flags) void housekeeping_affine(struct task_struct *t, enum hk_flags flags)
{ {
if (static_branch_unlikely(&housekeeping_overriden)) if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags) if (housekeeping_flags & flags)
set_cpus_allowed_ptr(t, housekeeping_mask); set_cpus_allowed_ptr(t, housekeeping_mask);
} }
...@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(housekeeping_affine); ...@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(housekeeping_affine);
bool housekeeping_test_cpu(int cpu, enum hk_flags flags) bool housekeeping_test_cpu(int cpu, enum hk_flags flags)
{ {
if (static_branch_unlikely(&housekeeping_overriden)) if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags) if (housekeeping_flags & flags)
return cpumask_test_cpu(cpu, housekeeping_mask); return cpumask_test_cpu(cpu, housekeeping_mask);
return true; return true;
...@@ -53,7 +53,7 @@ void __init housekeeping_init(void) ...@@ -53,7 +53,7 @@ void __init housekeeping_init(void)
if (!housekeeping_flags) if (!housekeeping_flags)
return; return;
static_branch_enable(&housekeeping_overriden); static_branch_enable(&housekeeping_overridden);
if (housekeeping_flags & HK_FLAG_TICK) if (housekeeping_flags & HK_FLAG_TICK)
sched_tick_offload_init(); sched_tick_offload_init();
......
...@@ -637,7 +637,7 @@ struct dl_rq { ...@@ -637,7 +637,7 @@ struct dl_rq {
/* /*
* Deadline values of the currently executing and the * Deadline values of the currently executing and the
* earliest ready task on this rq. Caching these facilitates * earliest ready task on this rq. Caching these facilitates
* the decision wether or not a ready but not running task * the decision whether or not a ready but not running task
* should migrate somewhere else. * should migrate somewhere else.
*/ */
struct { struct {
...@@ -1434,7 +1434,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) ...@@ -1434,7 +1434,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* After ->cpu is set up to a new value, task_rq_lock(p, ...) can be * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
* successfuly executed on another CPU. We must ensure that updates of * successfully executed on another CPU. We must ensure that updates of
* per-task data have been completed by this moment. * per-task data have been completed by this moment.
*/ */
smp_wmb(); smp_wmb();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment