Commit 731dade1 authored by Michael Ellerman's avatar Michael Ellerman

powerpc/kvm: Explicitly mark kvm guest code as __init

All the code in kvm.c can be marked __init. Most of it is already
inlined into the initcall, but not all. So instead of relying on the
inlining, mark it all as __init. This saves ~280 bytes of text for my
configuration.
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190911115746.12433-3-mpe@ellerman.id.au
parent dac39f78
...@@ -68,13 +68,13 @@ extern char kvm_tmp[]; ...@@ -68,13 +68,13 @@ extern char kvm_tmp[];
extern char kvm_tmp_end[]; extern char kvm_tmp_end[];
static int kvm_tmp_index; static int kvm_tmp_index;
static inline void kvm_patch_ins(u32 *inst, u32 new_inst) static void __init kvm_patch_ins(u32 *inst, u32 new_inst)
{ {
*inst = new_inst; *inst = new_inst;
flush_icache_range((ulong)inst, (ulong)inst + 4); flush_icache_range((ulong)inst, (ulong)inst + 4);
} }
static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt) static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
{ {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
...@@ -83,7 +83,7 @@ static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt) ...@@ -83,7 +83,7 @@ static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
#endif #endif
} }
static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
{ {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
...@@ -92,12 +92,12 @@ static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) ...@@ -92,12 +92,12 @@ static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
#endif #endif
} }
static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt) static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
{ {
kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff)); kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
} }
static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt) static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
{ {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc)); kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
...@@ -106,17 +106,17 @@ static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt) ...@@ -106,17 +106,17 @@ static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
#endif #endif
} }
static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt) static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
{ {
kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc)); kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
} }
static void kvm_patch_ins_nop(u32 *inst) static void __init kvm_patch_ins_nop(u32 *inst)
{ {
kvm_patch_ins(inst, KVM_INST_NOP); kvm_patch_ins(inst, KVM_INST_NOP);
} }
static void kvm_patch_ins_b(u32 *inst, int addr) static void __init kvm_patch_ins_b(u32 *inst, int addr)
{ {
#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S) #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
/* On relocatable kernels interrupts handlers and our code /* On relocatable kernels interrupts handlers and our code
...@@ -129,7 +129,7 @@ static void kvm_patch_ins_b(u32 *inst, int addr) ...@@ -129,7 +129,7 @@ static void kvm_patch_ins_b(u32 *inst, int addr)
kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK)); kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
} }
static u32 *kvm_alloc(int len) static u32 * __init kvm_alloc(int len)
{ {
u32 *p; u32 *p;
...@@ -152,7 +152,7 @@ extern u32 kvm_emulate_mtmsrd_orig_ins_offs; ...@@ -152,7 +152,7 @@ extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
extern u32 kvm_emulate_mtmsrd_len; extern u32 kvm_emulate_mtmsrd_len;
extern u32 kvm_emulate_mtmsrd[]; extern u32 kvm_emulate_mtmsrd[];
static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt) static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
{ {
u32 *p; u32 *p;
int distance_start; int distance_start;
...@@ -205,7 +205,7 @@ extern u32 kvm_emulate_mtmsr_orig_ins_offs; ...@@ -205,7 +205,7 @@ extern u32 kvm_emulate_mtmsr_orig_ins_offs;
extern u32 kvm_emulate_mtmsr_len; extern u32 kvm_emulate_mtmsr_len;
extern u32 kvm_emulate_mtmsr[]; extern u32 kvm_emulate_mtmsr[];
static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt) static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
{ {
u32 *p; u32 *p;
int distance_start; int distance_start;
...@@ -266,7 +266,7 @@ extern u32 kvm_emulate_wrtee_orig_ins_offs; ...@@ -266,7 +266,7 @@ extern u32 kvm_emulate_wrtee_orig_ins_offs;
extern u32 kvm_emulate_wrtee_len; extern u32 kvm_emulate_wrtee_len;
extern u32 kvm_emulate_wrtee[]; extern u32 kvm_emulate_wrtee[];
static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
{ {
u32 *p; u32 *p;
int distance_start; int distance_start;
...@@ -323,7 +323,7 @@ extern u32 kvm_emulate_wrteei_0_branch_offs; ...@@ -323,7 +323,7 @@ extern u32 kvm_emulate_wrteei_0_branch_offs;
extern u32 kvm_emulate_wrteei_0_len; extern u32 kvm_emulate_wrteei_0_len;
extern u32 kvm_emulate_wrteei_0[]; extern u32 kvm_emulate_wrteei_0[];
static void kvm_patch_ins_wrteei_0(u32 *inst) static void __init kvm_patch_ins_wrteei_0(u32 *inst)
{ {
u32 *p; u32 *p;
int distance_start; int distance_start;
...@@ -364,7 +364,7 @@ extern u32 kvm_emulate_mtsrin_orig_ins_offs; ...@@ -364,7 +364,7 @@ extern u32 kvm_emulate_mtsrin_orig_ins_offs;
extern u32 kvm_emulate_mtsrin_len; extern u32 kvm_emulate_mtsrin_len;
extern u32 kvm_emulate_mtsrin[]; extern u32 kvm_emulate_mtsrin[];
static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
{ {
u32 *p; u32 *p;
int distance_start; int distance_start;
...@@ -400,7 +400,7 @@ static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) ...@@ -400,7 +400,7 @@ static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
#endif #endif
static void kvm_map_magic_page(void *data) static void __init kvm_map_magic_page(void *data)
{ {
u32 *features = data; u32 *features = data;
...@@ -415,7 +415,7 @@ static void kvm_map_magic_page(void *data) ...@@ -415,7 +415,7 @@ static void kvm_map_magic_page(void *data)
*features = out[0]; *features = out[0];
} }
static void kvm_check_ins(u32 *inst, u32 features) static void __init kvm_check_ins(u32 *inst, u32 features)
{ {
u32 _inst = *inst; u32 _inst = *inst;
u32 inst_no_rt = _inst & ~KVM_MASK_RT; u32 inst_no_rt = _inst & ~KVM_MASK_RT;
...@@ -659,7 +659,7 @@ static void kvm_check_ins(u32 *inst, u32 features) ...@@ -659,7 +659,7 @@ static void kvm_check_ins(u32 *inst, u32 features)
extern u32 kvm_template_start[]; extern u32 kvm_template_start[];
extern u32 kvm_template_end[]; extern u32 kvm_template_end[];
static void kvm_use_magic_page(void) static void __init kvm_use_magic_page(void)
{ {
u32 *p; u32 *p;
u32 *start, *end; u32 *start, *end;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment