Commit e43f1331 authored by James Morse's avatar James Morse Committed by Marc Zyngier

arm64: Ask the compiler to __always_inline functions used by KVM at HYP

KVM uses some of the static-inline helpers like icache_is_vipt() from
its HYP code. This assumes the function is inlined so that the code is
mapped to EL2. The compiler may decide not to inline these, and the
out-of-line version may not be in the __hyp_text section.

Add the additional __always_ hint to these static-inlines that are used
by KVM.
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Acked-by: default avatarWill Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20200220165839.256881-4-james.morse@arm.com
parent 8c2d146e
...@@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void) ...@@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void)
return test_bit(ICACHEF_ALIASING, &__icache_flags); return test_bit(ICACHEF_ALIASING, &__icache_flags);
} }
static inline int icache_is_vpipt(void) static __always_inline int icache_is_vpipt(void)
{ {
return test_bit(ICACHEF_VPIPT, &__icache_flags); return test_bit(ICACHEF_VPIPT, &__icache_flags);
} }
......
...@@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, ...@@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *); extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void) static __always_inline void __flush_icache_all(void)
{ {
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
return; return;
......
...@@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field) ...@@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field)
return cpuid_feature_extract_signed_field_width(features, field, 4); return cpuid_feature_extract_signed_field_width(features, field, 4);
} }
static inline unsigned int __attribute_const__ static __always_inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
{ {
return (u64)(features << (64 - width - field)) >> (64 - width); return (u64)(features << (64 - width - field)) >> (64 - width);
} }
static inline unsigned int __attribute_const__ static __always_inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field(u64 features, int field) cpuid_feature_extract_unsigned_field(u64 features, int field)
{ {
return cpuid_feature_extract_unsigned_field_width(features, field, 4); return cpuid_feature_extract_unsigned_field_width(features, field, 4);
...@@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void) ...@@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void)
return val == 0x1; return val == 0x1;
} }
static inline bool system_supports_fpsimd(void) static __always_inline bool system_supports_fpsimd(void)
{ {
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
} }
...@@ -575,7 +575,7 @@ static inline bool system_uses_ttbr0_pan(void) ...@@ -575,7 +575,7 @@ static inline bool system_uses_ttbr0_pan(void)
!cpus_have_const_cap(ARM64_HAS_PAN); !cpus_have_const_cap(ARM64_HAS_PAN);
} }
static inline bool system_supports_sve(void) static __always_inline bool system_supports_sve(void)
{ {
return IS_ENABLED(CONFIG_ARM64_SVE) && return IS_ENABLED(CONFIG_ARM64_SVE) &&
cpus_have_const_cap(ARM64_SVE); cpus_have_const_cap(ARM64_SVE);
......
...@@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr) ...@@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
} }
#define __raw_writel __raw_writel #define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr) static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
{ {
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
} }
...@@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) ...@@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
} }
#define __raw_readl __raw_readl #define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr) static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
{ {
u32 val; u32 val;
asm volatile(ALTERNATIVE("ldr %w0, [%1]", asm volatile(ALTERNATIVE("ldr %w0, [%1]",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment