Commit 317d359d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Force proper alignment of 'struct util_est'

For some as yet not understood reason, Tony gets unaligned access
traps on IA64 because of:

  struct util_est ue = READ_ONCE(p->se.avg.util_est);

and:

  WRITE_ONCE(p->se.avg.util_est, ue);

introduced by commit:

  d519329f ("sched/fair: Update util_est only on util_avg updates")

Normally those two fields should end up on an 8-byte aligned location,
but UP and RANDSTRUCT can mess that up so enforce the alignment
explicitly.

Also make the alignment on sched_avg unconditional, as it is really
about data locality, not false-sharing.

With or without this patch the layout for sched_avg on a
ia64-defconfig build looks like:

	$ pahole -EC sched_avg ia64-defconfig/kernel/sched/core.o
	die__process_function: tag not supported (INVALID)!
	struct sched_avg {
		/* typedef u64 */ long long unsigned int     last_update_time;                   /*     0     8 */
		/* typedef u64 */ long long unsigned int     load_sum;                           /*     8     8 */
		/* typedef u64 */ long long unsigned int     runnable_load_sum;                  /*    16     8 */
		/* typedef u32 */ unsigned int               util_sum;                           /*    24     4 */
		/* typedef u32 */ unsigned int               period_contrib;                     /*    28     4 */
		long unsigned int          load_avg;                                             /*    32     8 */
		long unsigned int          runnable_load_avg;                                    /*    40     8 */
		long unsigned int          util_avg;                                             /*    48     8 */
		struct util_est {
			unsigned int       enqueued;                                             /*    56     4 */
			unsigned int       ewma;                                                 /*    60     4 */
		} util_est; /*    56     8 */
		/* --- cacheline 1 boundary (64 bytes) --- */

		/* size: 64, cachelines: 1, members: 9 */
	};
Reported-and-Tested-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Norbert Manthey <nmanthey@amazon.de>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony <tony.luck@intel.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Fixes: d519329f ("sched/fair: Update util_est only on util_avg updates")
Link: http://lkml.kernel.org/r/20180405080521.GG4129@hirez.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent adcc8da8
...@@ -300,7 +300,7 @@ struct util_est { ...@@ -300,7 +300,7 @@ struct util_est {
unsigned int enqueued; unsigned int enqueued;
unsigned int ewma; unsigned int ewma;
#define UTIL_EST_WEIGHT_SHIFT 2 #define UTIL_EST_WEIGHT_SHIFT 2
}; } __attribute__((__aligned__(sizeof(u64))));
/* /*
* The load_avg/util_avg accumulates an infinite geometric series * The load_avg/util_avg accumulates an infinite geometric series
...@@ -364,7 +364,7 @@ struct sched_avg { ...@@ -364,7 +364,7 @@ struct sched_avg {
unsigned long runnable_load_avg; unsigned long runnable_load_avg;
unsigned long util_avg; unsigned long util_avg;
struct util_est util_est; struct util_est util_est;
}; } ____cacheline_aligned;
struct sched_statistics { struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
...@@ -435,7 +435,7 @@ struct sched_entity { ...@@ -435,7 +435,7 @@ struct sched_entity {
* Put into separate cache line so it does not * Put into separate cache line so it does not
* collide with read-mostly values above. * collide with read-mostly values above.
*/ */
struct sched_avg avg ____cacheline_aligned_in_smp; struct sched_avg avg;
#endif #endif
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment