Commit d562de48 authored by Alexander Graf's avatar Alexander Graf Committed by Avi Kivity

KVM: PPC: Convert DSISR to shared page

The DSISR register contains information about a data page fault. It is fully
read/write from inside the guest context and we don't need to worry about
interacting based on writes of this register.

This patch converts all users of the current field to the shared page.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 666e7252
...@@ -85,7 +85,6 @@ struct kvmppc_vcpu_book3s { ...@@ -85,7 +85,6 @@ struct kvmppc_vcpu_book3s {
u64 hid[6]; u64 hid[6];
u64 gqr[8]; u64 gqr[8];
int slb_nr; int slb_nr;
u32 dsisr;
u64 sdr1; u64 sdr1;
u64 hior; u64 hior;
u64 msr_mask; u64 msr_mask;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
struct kvm_vcpu_arch_shared { struct kvm_vcpu_arch_shared {
__u64 msr; __u64 msr;
__u32 dsisr;
}; };
#ifdef __KERNEL__ #ifdef __KERNEL__
......
...@@ -595,15 +595,16 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -595,15 +595,16 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (page_found == -ENOENT) { if (page_found == -ENOENT) {
/* Page not found in guest PTE entries */ /* Page not found in guest PTE entries */
vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
vcpu->arch.shared->msr |= vcpu->arch.shared->msr |=
(to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec); kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) { } else if (page_found == -EPERM) {
/* Storage protection */ /* Storage protection */
vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; vcpu->arch.shared->dsisr =
to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
vcpu->arch.shared->msr |= vcpu->arch.shared->msr |=
(to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec); kvmppc_book3s_queue_irqprio(vcpu, vec);
...@@ -867,7 +868,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -867,7 +868,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
} else { } else {
vcpu->arch.dear = dar; vcpu->arch.dear = dar;
to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr); kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL); kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
r = RESUME_GUEST; r = RESUME_GUEST;
...@@ -994,7 +995,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -994,7 +995,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
case BOOK3S_INTERRUPT_ALIGNMENT: case BOOK3S_INTERRUPT_ALIGNMENT:
if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
kvmppc_get_last_inst(vcpu)); kvmppc_get_last_inst(vcpu));
vcpu->arch.dear = kvmppc_alignment_dar(vcpu, vcpu->arch.dear = kvmppc_alignment_dar(vcpu,
kvmppc_get_last_inst(vcpu)); kvmppc_get_last_inst(vcpu));
......
...@@ -221,7 +221,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -221,7 +221,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
else if (r == -EPERM) else if (r == -EPERM)
dsisr |= DSISR_PROTFAULT; dsisr |= DSISR_PROTFAULT;
to_book3s(vcpu)->dsisr = dsisr; vcpu->arch.shared->dsisr = dsisr;
to_svcpu(vcpu)->fault_dsisr = dsisr; to_svcpu(vcpu)->fault_dsisr = dsisr;
kvmppc_book3s_queue_irqprio(vcpu, kvmppc_book3s_queue_irqprio(vcpu,
...@@ -327,7 +327,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) ...@@ -327,7 +327,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
to_book3s(vcpu)->sdr1 = spr_val; to_book3s(vcpu)->sdr1 = spr_val;
break; break;
case SPRN_DSISR: case SPRN_DSISR:
to_book3s(vcpu)->dsisr = spr_val; vcpu->arch.shared->dsisr = spr_val;
break; break;
case SPRN_DAR: case SPRN_DAR:
vcpu->arch.dear = spr_val; vcpu->arch.dear = spr_val;
...@@ -440,7 +440,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -440,7 +440,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
break; break;
case SPRN_DSISR: case SPRN_DSISR:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr);
break; break;
case SPRN_DAR: case SPRN_DAR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear);
......
...@@ -173,7 +173,7 @@ static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) ...@@ -173,7 +173,7 @@ static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
/* Page Fault */ /* Page Fault */
dsisr = kvmppc_set_field(0, 33, 33, 1); dsisr = kvmppc_set_field(0, 33, 33, 1);
if (is_store) if (is_store)
to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment