Commit f2569053 authored by Avi Kivity's avatar Avi Kivity

Merge branch 'for-upstream' of git://github.com/agraf/linux-2.6 into next

PPC updates from Alex.

* 'for-upstream' of git://github.com/agraf/linux-2.6:
  KVM: PPC: Emulator: clean up SPR reads and writes
  KVM: PPC: Emulator: clean up instruction parsing
  kvm/powerpc: Add new ioctl to retreive server MMU infos
  kvm/book3s: Make kernel emulated H_PUT_TCE available for "PR" KVM
  KVM: PPC: bookehv: Fix r8/r13 storing in level exception handler
  KVM: PPC: Book3S: Enable IRQs during exit handling
  KVM: PPC: Fix PR KVM on POWER7 bare metal
  KVM: PPC: Fix stbux emulation
  KVM: PPC: bookehv: Use lwz/stw instead of PPC_LL/PPC_STL for 32-bit fields
  KVM: PPC: Book3S: PR: No isync in slbie path
  KVM: PPC: Book3S: PR: Optimize entry path
  KVM: PPC: booke(hv): Fix save/restore of guest accessible SPRGs.
  KVM: PPC: Restrict PPC_[L|ST]D macro to asm code
  KVM: PPC: bookehv: Use a Macro for saving/restoring guest registers to/from their 64 bit copies.
  KVM: PPC: Use clockevent multiplier and shifter for decrementer
  KVM: Use minimum and maximum address mapped by TLB1
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parents 9f4260e7 54771e62
...@@ -1860,6 +1860,76 @@ See KVM_GET_PIT2 for details on struct kvm_pit_state2. ...@@ -1860,6 +1860,76 @@ See KVM_GET_PIT2 for details on struct kvm_pit_state2.
This IOCTL replaces the obsolete KVM_SET_PIT. This IOCTL replaces the obsolete KVM_SET_PIT.
4.74 KVM_PPC_GET_SMMU_INFO
Capability: KVM_CAP_PPC_GET_SMMU_INFO
Architectures: powerpc
Type: vm ioctl
Parameters: None
Returns: 0 on success, -1 on error
This populates and returns a structure describing the features of
the "Server" class MMU emulation supported by KVM.
This can in turn be used by userspace to generate the appropariate
device-tree properties for the guest operating system.
The structure contains some global informations, followed by an
array of supported segment page sizes:
struct kvm_ppc_smmu_info {
__u64 flags;
__u32 slb_size;
__u32 pad;
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
The supported flags are:
- KVM_PPC_PAGE_SIZES_REAL:
When that flag is set, guest page sizes must "fit" the backing
store page sizes. When not set, any page size in the list can
be used regardless of how they are backed by userspace.
- KVM_PPC_1T_SEGMENTS
The emulated MMU supports 1T segments in addition to the
standard 256M ones.
The "slb_size" field indicates how many SLB entries are supported
The "sps" array contains 8 entries indicating the supported base
page sizes for a segment in increasing order. Each entry is defined
as follow:
struct kvm_ppc_one_seg_page_size {
__u32 page_shift; /* Base page shift of segment (or 0) */
__u32 slb_enc; /* SLB encoding for BookS */
struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
An entry with a "page_shift" of 0 is unused. Because the array is
organized in increasing order, a lookup can stop when encoutering
such an entry.
The "slb_enc" field provides the encoding to use in the SLB for the
page size. The bits are in positions such as the value can directly
be OR'ed into the "vsid" argument of the slbmte instruction.
The "enc" array is a list which for each of those segment base page
size provides the list of supported actual page sizes (which can be
only larger or equal to the base page size), along with the
corresponding encoding in the hash PTE. Similarily, the array is
8 entries sorted by increasing sizes and an entry with a "0" shift
is an empty entry and a terminator:
struct kvm_ppc_one_page_size {
__u32 page_shift; /* Page shift (or 0) */
__u32 pte_enc; /* Encoding in the HPTE (>>12) */
};
The "pte_enc" field provides a value that can OR'ed into the hash
PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
into the hash PTE second double word).
5. The kvm_run structure 5. The kvm_run structure
------------------------ ------------------------
......
...@@ -20,6 +20,16 @@ ...@@ -20,6 +20,16 @@
#ifndef __POWERPC_KVM_ASM_H__ #ifndef __POWERPC_KVM_ASM_H__
#define __POWERPC_KVM_ASM_H__ #define __POWERPC_KVM_ASM_H__
#ifdef __ASSEMBLY__
#ifdef CONFIG_64BIT
#define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg)
#define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
#else
#define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg)
#define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg)
#endif
#endif
/* IVPR must be 64KiB-aligned. */ /* IVPR must be 64KiB-aligned. */
#define VCPU_SIZE_ORDER 4 #define VCPU_SIZE_ORDER 4
#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) #define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
......
...@@ -237,7 +237,6 @@ struct kvm_arch { ...@@ -237,7 +237,6 @@ struct kvm_arch {
unsigned long vrma_slb_v; unsigned long vrma_slb_v;
int rma_setup_done; int rma_setup_done;
int using_mmu_notifiers; int using_mmu_notifiers;
struct list_head spapr_tce_tables;
spinlock_t slot_phys_lock; spinlock_t slot_phys_lock;
unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
int slot_npages[KVM_MEM_SLOTS_NUM]; int slot_npages[KVM_MEM_SLOTS_NUM];
...@@ -245,6 +244,9 @@ struct kvm_arch { ...@@ -245,6 +244,9 @@ struct kvm_arch {
struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
struct kvmppc_linear_info *hpt_li; struct kvmppc_linear_info *hpt_li;
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
#endif
}; };
/* /*
......
...@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, ...@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int op, int *advance); unsigned int op, int *advance);
extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); ulong val);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
ulong *val);
extern int kvmppc_booke_init(void); extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void); extern void kvmppc_booke_exit(void);
...@@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, ...@@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args); struct kvm_create_spapr_tce *args);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma); struct kvm_allocate_rma *rma);
extern struct kvmppc_linear_info *kvm_alloc_rma(void); extern struct kvmppc_linear_info *kvm_alloc_rma(void);
...@@ -138,6 +142,8 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, ...@@ -138,6 +142,8 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem); struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm, extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem); struct kvm_userspace_memory_region *mem);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info);
extern int kvmppc_bookehv_init(void); extern int kvmppc_bookehv_init(void);
extern void kvmppc_bookehv_exit(void); extern void kvmppc_bookehv_exit(void);
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
extern unsigned long tb_ticks_per_jiffy; extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec; extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec; extern unsigned long tb_ticks_per_sec;
extern struct clock_event_device decrementer_clockevent;
struct rtc_time; struct rtc_time;
extern void to_tm(int tim, struct rtc_time * tm); extern void to_tm(int tim, struct rtc_time * tm);
......
...@@ -190,3 +190,7 @@ EXPORT_SYMBOL(__arch_hweight16); ...@@ -190,3 +190,7 @@ EXPORT_SYMBOL(__arch_hweight16);
EXPORT_SYMBOL(__arch_hweight32); EXPORT_SYMBOL(__arch_hweight32);
EXPORT_SYMBOL(__arch_hweight64); EXPORT_SYMBOL(__arch_hweight64);
#endif #endif
#ifdef CONFIG_PPC_BOOK3S_64
EXPORT_SYMBOL_GPL(mmu_psize_defs);
#endif
...@@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt, ...@@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt,
static void decrementer_set_mode(enum clock_event_mode mode, static void decrementer_set_mode(enum clock_event_mode mode,
struct clock_event_device *dev); struct clock_event_device *dev);
static struct clock_event_device decrementer_clockevent = { struct clock_event_device decrementer_clockevent = {
.name = "decrementer", .name = "decrementer",
.rating = 200, .rating = 200,
.irq = 0, .irq = 0,
...@@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = { ...@@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = {
.set_mode = decrementer_set_mode, .set_mode = decrementer_set_mode,
.features = CLOCK_EVT_FEAT_ONESHOT, .features = CLOCK_EVT_FEAT_ONESHOT,
}; };
EXPORT_SYMBOL(decrementer_clockevent);
DEFINE_PER_CPU(u64, decrementers_next_tb); DEFINE_PER_CPU(u64, decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers); static DEFINE_PER_CPU(struct clock_event_device, decrementers);
......
...@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
int dcrn; int dcrn = get_dcrn(inst);
int ra; int ra = get_ra(inst);
int rb; int rb = get_rb(inst);
int rc; int rc = get_rc(inst);
int rs; int rs = get_rs(inst);
int rt; int rt = get_rt(inst);
int ws; int ws = get_ws(inst);
switch (get_op(inst)) { switch (get_op(inst)) {
case 31: case 31:
switch (get_xop(inst)) { switch (get_xop(inst)) {
case XOP_MFDCR: case XOP_MFDCR:
dcrn = get_dcrn(inst);
rt = get_rt(inst);
/* The guest may access CPR0 registers to determine the timebase /* The guest may access CPR0 registers to determine the timebase
* frequency, and it must know the real host frequency because it * frequency, and it must know the real host frequency because it
* can directly access the timebase registers. * can directly access the timebase registers.
...@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case XOP_MTDCR: case XOP_MTDCR:
dcrn = get_dcrn(inst);
rs = get_rs(inst);
/* emulate some access in kernel */ /* emulate some access in kernel */
switch (dcrn) { switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR: case DCRN_CPR0_CONFIG_ADDR:
...@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case XOP_TLBWE: case XOP_TLBWE:
ra = get_ra(inst);
rs = get_rs(inst);
ws = get_ws(inst);
emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
break; break;
case XOP_TLBSX: case XOP_TLBSX:
rt = get_rt(inst);
ra = get_ra(inst);
rb = get_rb(inst);
rc = get_rc(inst);
emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
break; break;
...@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated; return emulated;
} }
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
switch (sprn) { switch (sprn) {
case SPRN_PID: case SPRN_PID:
kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break; kvmppc_set_pid(vcpu, spr_val); break;
case SPRN_MMUCR: case SPRN_MMUCR:
vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break; vcpu->arch.mmucr = spr_val; break;
case SPRN_CCR0: case SPRN_CCR0:
vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break; vcpu->arch.ccr0 = spr_val; break;
case SPRN_CCR1: case SPRN_CCR1:
vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break; vcpu->arch.ccr1 = spr_val; break;
default: default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
} }
return emulated; return emulated;
} }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
switch (sprn) { switch (sprn) {
case SPRN_PID: case SPRN_PID:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; *spr_val = vcpu->arch.pid; break;
case SPRN_MMUCR: case SPRN_MMUCR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; *spr_val = vcpu->arch.mmucr; break;
case SPRN_CCR0: case SPRN_CCR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; *spr_val = vcpu->arch.ccr0; break;
case SPRN_CCR1: case SPRN_CCR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; *spr_val = vcpu->arch.ccr1; break;
default: default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
} }
return emulated; return emulated;
......
...@@ -54,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ ...@@ -54,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
book3s_paired_singles.o \ book3s_paired_singles.o \
book3s_pr.o \ book3s_pr.o \
book3s_pr_papr.o \ book3s_pr_papr.o \
book3s_64_vio_hv.o \
book3s_emulate.o \ book3s_emulate.o \
book3s_interrupts.o \ book3s_interrupts.o \
book3s_mmu_hpte.o \ book3s_mmu_hpte.o \
...@@ -78,6 +79,7 @@ kvm-book3s_64-module-objs := \ ...@@ -78,6 +79,7 @@ kvm-book3s_64-module-objs := \
powerpc.o \ powerpc.o \
emulate.o \ emulate.o \
book3s.o \ book3s.o \
book3s_64_vio.o \
$(kvm-book3s_64-objs-y) $(kvm-book3s_64-objs-y)
kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
......
...@@ -90,8 +90,6 @@ slb_exit_skip_ ## num: ...@@ -90,8 +90,6 @@ slb_exit_skip_ ## num:
or r10, r10, r12 or r10, r10, r12
slbie r10 slbie r10
isync
/* Fill SLB with our shadow */ /* Fill SLB with our shadow */
lbz r12, SVCPU_SLB_MAX(r3) lbz r12, SVCPU_SLB_MAX(r3)
......
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
* Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/list.h>
#include <linux/anon_inodes.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu-hash64.h>
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/kvm_host.h>
#include <asm/udbg.h>
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
static long kvmppc_stt_npages(unsigned long window_size)
{
return ALIGN((window_size >> SPAPR_TCE_SHIFT)
* sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
}
static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
{
struct kvm *kvm = stt->kvm;
int i;
mutex_lock(&kvm->lock);
list_del(&stt->list);
for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
__free_page(stt->pages[i]);
kfree(stt);
mutex_unlock(&kvm->lock);
kvm_put_kvm(kvm);
}
static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
return VM_FAULT_SIGBUS;
page = stt->pages[vmf->pgoff];
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
.fault = kvm_spapr_tce_fault,
};
static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
{
vma->vm_ops = &kvm_spapr_tce_vm_ops;
return 0;
}
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
{
struct kvmppc_spapr_tce_table *stt = filp->private_data;
release_spapr_tce_table(stt);
return 0;
}
static struct file_operations kvm_spapr_tce_fops = {
.mmap = kvm_spapr_tce_mmap,
.release = kvm_spapr_tce_release,
};
long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
long npages;
int ret = -ENOMEM;
int i;
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
npages = kvmppc_stt_npages(args->window_size);
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL);
if (!stt)
goto fail;
stt->liobn = args->liobn;
stt->window_size = args->window_size;
stt->kvm = kvm;
for (i = 0; i < npages; i++) {
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!stt->pages[i])
goto fail;
}
kvm_get_kvm(kvm);
mutex_lock(&kvm->lock);
list_add(&stt->list, &kvm->arch.spapr_tce_tables);
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR);
fail:
if (stt) {
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt);
}
return ret;
}
...@@ -38,6 +38,9 @@ ...@@ -38,6 +38,9 @@
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
/* WARNING: This will be called in real-mode on HV KVM and virtual
* mode on PR KVM
*/
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce) unsigned long ioba, unsigned long tce)
{ {
......
...@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
int rt = get_rt(inst);
int rs = get_rs(inst);
int ra = get_ra(inst);
int rb = get_rb(inst);
switch (get_op(inst)) { switch (get_op(inst)) {
case 19: case 19:
...@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 31: case 31:
switch (get_xop(inst)) { switch (get_xop(inst)) {
case OP_31_XOP_MFMSR: case OP_31_XOP_MFMSR:
kvmppc_set_gpr(vcpu, get_rt(inst), kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
vcpu->arch.shared->msr);
break; break;
case OP_31_XOP_MTMSRD: case OP_31_XOP_MTMSRD:
{ {
ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); ulong rs_val = kvmppc_get_gpr(vcpu, rs);
if (inst & 0x10000) { if (inst & 0x10000) {
vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE); ulong new_msr = vcpu->arch.shared->msr;
vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE); new_msr &= ~(MSR_RI | MSR_EE);
new_msr |= rs_val & (MSR_RI | MSR_EE);
vcpu->arch.shared->msr = new_msr;
} else } else
kvmppc_set_msr(vcpu, rs); kvmppc_set_msr(vcpu, rs_val);
break; break;
} }
case OP_31_XOP_MTMSR: case OP_31_XOP_MTMSR:
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break; break;
case OP_31_XOP_MFSR: case OP_31_XOP_MFSR:
{ {
...@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (vcpu->arch.mmu.mfsrin) { if (vcpu->arch.mmu.mfsrin) {
u32 sr; u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
kvmppc_set_gpr(vcpu, get_rt(inst), sr); kvmppc_set_gpr(vcpu, rt, sr);
} }
break; break;
} }
...@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
{ {
int srnum; int srnum;
srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
if (vcpu->arch.mmu.mfsrin) { if (vcpu->arch.mmu.mfsrin) {
u32 sr; u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
kvmppc_set_gpr(vcpu, get_rt(inst), sr); kvmppc_set_gpr(vcpu, rt, sr);
} }
break; break;
} }
case OP_31_XOP_MTSR: case OP_31_XOP_MTSR:
vcpu->arch.mmu.mtsrin(vcpu, vcpu->arch.mmu.mtsrin(vcpu,
(inst >> 16) & 0xf, (inst >> 16) & 0xf,
kvmppc_get_gpr(vcpu, get_rs(inst))); kvmppc_get_gpr(vcpu, rs));
break; break;
case OP_31_XOP_MTSRIN: case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu, vcpu->arch.mmu.mtsrin(vcpu,
(kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
kvmppc_get_gpr(vcpu, get_rs(inst))); kvmppc_get_gpr(vcpu, rs));
break; break;
case OP_31_XOP_TLBIE: case OP_31_XOP_TLBIE:
case OP_31_XOP_TLBIEL: case OP_31_XOP_TLBIEL:
{ {
bool large = (inst & 0x00200000) ? true : false; bool large = (inst & 0x00200000) ? true : false;
ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); ulong addr = kvmppc_get_gpr(vcpu, rb);
vcpu->arch.mmu.tlbie(vcpu, addr, large); vcpu->arch.mmu.tlbie(vcpu, addr, large);
break; break;
} }
...@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL; return EMULATE_FAIL;
vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.mmu.slbmte(vcpu,
kvmppc_get_gpr(vcpu, get_rs(inst)), kvmppc_get_gpr(vcpu, rs),
kvmppc_get_gpr(vcpu, get_rb(inst))); kvmppc_get_gpr(vcpu, rb));
break; break;
case OP_31_XOP_SLBIE: case OP_31_XOP_SLBIE:
if (!vcpu->arch.mmu.slbie) if (!vcpu->arch.mmu.slbie)
return EMULATE_FAIL; return EMULATE_FAIL;
vcpu->arch.mmu.slbie(vcpu, vcpu->arch.mmu.slbie(vcpu,
kvmppc_get_gpr(vcpu, get_rb(inst))); kvmppc_get_gpr(vcpu, rb));
break; break;
case OP_31_XOP_SLBIA: case OP_31_XOP_SLBIA:
if (!vcpu->arch.mmu.slbia) if (!vcpu->arch.mmu.slbia)
...@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!vcpu->arch.mmu.slbmfee) { if (!vcpu->arch.mmu.slbmfee) {
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;
} else { } else {
ulong t, rb; ulong t, rb_val;
rb = kvmppc_get_gpr(vcpu, get_rb(inst)); rb_val = kvmppc_get_gpr(vcpu, rb);
t = vcpu->arch.mmu.slbmfee(vcpu, rb); t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
kvmppc_set_gpr(vcpu, get_rt(inst), t); kvmppc_set_gpr(vcpu, rt, t);
} }
break; break;
case OP_31_XOP_SLBMFEV: case OP_31_XOP_SLBMFEV:
if (!vcpu->arch.mmu.slbmfev) { if (!vcpu->arch.mmu.slbmfev) {
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;
} else { } else {
ulong t, rb; ulong t, rb_val;
rb = kvmppc_get_gpr(vcpu, get_rb(inst)); rb_val = kvmppc_get_gpr(vcpu, rb);
t = vcpu->arch.mmu.slbmfev(vcpu, rb); t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
kvmppc_set_gpr(vcpu, get_rt(inst), t); kvmppc_set_gpr(vcpu, rt, t);
} }
break; break;
case OP_31_XOP_DCBA: case OP_31_XOP_DCBA:
...@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case OP_31_XOP_DCBZ: case OP_31_XOP_DCBZ:
{ {
ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); ulong rb_val = kvmppc_get_gpr(vcpu, rb);
ulong ra = 0; ulong ra_val = 0;
ulong addr, vaddr; ulong addr, vaddr;
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
u32 dsisr; u32 dsisr;
int r; int r;
if (get_ra(inst)) if (ra)
ra = kvmppc_get_gpr(vcpu, get_ra(inst)); ra_val = kvmppc_get_gpr(vcpu, ra);
addr = (ra + rb) & ~31ULL; addr = (ra_val + rb_val) & ~31ULL;
if (!(vcpu->arch.shared->msr & MSR_SF)) if (!(vcpu->arch.shared->msr & MSR_SF))
addr &= 0xffffffff; addr &= 0xffffffff;
vaddr = addr; vaddr = addr;
...@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) ...@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
return bat; return bat;
} }
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) { switch (sprn) {
case SPRN_SDR1: case SPRN_SDR1:
...@@ -428,7 +432,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) ...@@ -428,7 +432,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
return emulated; return emulated;
} }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
...@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
if (sprn % 2) if (sprn % 2)
kvmppc_set_gpr(vcpu, rt, bat->raw >> 32); *spr_val = bat->raw >> 32;
else else
kvmppc_set_gpr(vcpu, rt, bat->raw); *spr_val = bat->raw;
break; break;
} }
case SPRN_SDR1: case SPRN_SDR1:
if (!spr_allowed(vcpu, PRIV_HYPER)) if (!spr_allowed(vcpu, PRIV_HYPER))
goto unprivileged; goto unprivileged;
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); *spr_val = to_book3s(vcpu)->sdr1;
break; break;
case SPRN_DSISR: case SPRN_DSISR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr); *spr_val = vcpu->arch.shared->dsisr;
break; break;
case SPRN_DAR: case SPRN_DAR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); *spr_val = vcpu->arch.shared->dar;
break; break;
case SPRN_HIOR: case SPRN_HIOR:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); *spr_val = to_book3s(vcpu)->hior;
break; break;
case SPRN_HID0: case SPRN_HID0:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); *spr_val = to_book3s(vcpu)->hid[0];
break; break;
case SPRN_HID1: case SPRN_HID1:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); *spr_val = to_book3s(vcpu)->hid[1];
break; break;
case SPRN_HID2: case SPRN_HID2:
case SPRN_HID2_GEKKO: case SPRN_HID2_GEKKO:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); *spr_val = to_book3s(vcpu)->hid[2];
break; break;
case SPRN_HID4: case SPRN_HID4:
case SPRN_HID4_GEKKO: case SPRN_HID4_GEKKO:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); *spr_val = to_book3s(vcpu)->hid[4];
break; break;
case SPRN_HID5: case SPRN_HID5:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); *spr_val = to_book3s(vcpu)->hid[5];
break; break;
case SPRN_CFAR: case SPRN_CFAR:
case SPRN_PURR: case SPRN_PURR:
kvmppc_set_gpr(vcpu, rt, 0); *spr_val = 0;
break; break;
case SPRN_GQR0: case SPRN_GQR0:
case SPRN_GQR1: case SPRN_GQR1:
...@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_GQR5: case SPRN_GQR5:
case SPRN_GQR6: case SPRN_GQR6:
case SPRN_GQR7: case SPRN_GQR7:
kvmppc_set_gpr(vcpu, rt, *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
break; break;
case SPRN_THRM1: case SPRN_THRM1:
case SPRN_THRM2: case SPRN_THRM2:
...@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_PMC3_GEKKO: case SPRN_PMC3_GEKKO:
case SPRN_PMC4_GEKKO: case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO: case SPRN_WPAR_GEKKO:
kvmppc_set_gpr(vcpu, rt, 0); *spr_val = 0;
break; break;
default: default:
unprivileged: unprivileged:
...@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) ...@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
{ {
ulong dar = 0; ulong dar = 0;
ulong ra; ulong ra = get_ra(inst);
ulong rb = get_rb(inst);
switch (get_op(inst)) { switch (get_op(inst)) {
case OP_LFS: case OP_LFS:
case OP_LFD: case OP_LFD:
case OP_STFD: case OP_STFD:
case OP_STFS: case OP_STFS:
ra = get_ra(inst);
if (ra) if (ra)
dar = kvmppc_get_gpr(vcpu, ra); dar = kvmppc_get_gpr(vcpu, ra);
dar += (s32)((s16)inst); dar += (s32)((s16)inst);
break; break;
case 31: case 31:
ra = get_ra(inst);
if (ra) if (ra)
dar = kvmppc_get_gpr(vcpu, ra); dar = kvmppc_get_gpr(vcpu, ra);
dar += kvmppc_get_gpr(vcpu, get_rb(inst)); dar += kvmppc_get_gpr(vcpu, rb);
break; break;
default: default:
printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
......
...@@ -1093,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1093,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r; return r;
} }
static long kvmppc_stt_npages(unsigned long window_size)
{
return ALIGN((window_size >> SPAPR_TCE_SHIFT)
* sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
}
static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
{
struct kvm *kvm = stt->kvm;
int i;
mutex_lock(&kvm->lock);
list_del(&stt->list);
for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
__free_page(stt->pages[i]);
kfree(stt);
mutex_unlock(&kvm->lock);
kvm_put_kvm(kvm);
}
static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
return VM_FAULT_SIGBUS;
page = stt->pages[vmf->pgoff];
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
.fault = kvm_spapr_tce_fault,
};
static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
{
vma->vm_ops = &kvm_spapr_tce_vm_ops;
return 0;
}
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
{
struct kvmppc_spapr_tce_table *stt = filp->private_data;
release_spapr_tce_table(stt);
return 0;
}
static struct file_operations kvm_spapr_tce_fops = {
.mmap = kvm_spapr_tce_mmap,
.release = kvm_spapr_tce_release,
};
long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
long npages;
int ret = -ENOMEM;
int i;
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
npages = kvmppc_stt_npages(args->window_size);
stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
GFP_KERNEL);
if (!stt)
goto fail;
stt->liobn = args->liobn;
stt->window_size = args->window_size;
stt->kvm = kvm;
for (i = 0; i < npages; i++) {
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!stt->pages[i])
goto fail;
}
kvm_get_kvm(kvm);
mutex_lock(&kvm->lock);
list_add(&stt->list, &kvm->arch.spapr_tce_tables);
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR);
fail:
if (stt) {
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt);
}
return ret;
}
/* Work out RMLS (real mode limit selector) field value for a given RMA size. /* Work out RMLS (real mode limit selector) field value for a given RMA size.
Assumes POWER7 or PPC970. */ Assumes POWER7 or PPC970. */
...@@ -1284,6 +1175,38 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) ...@@ -1284,6 +1175,38 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
return fd; return fd;
} }
static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
int linux_psize)
{
struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
if (!def->shift)
return;
(*sps)->page_shift = def->shift;
(*sps)->slb_enc = def->sllp;
(*sps)->enc[0].page_shift = def->shift;
(*sps)->enc[0].pte_enc = def->penc;
(*sps)++;
}
int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
{
struct kvm_ppc_one_seg_page_size *sps;
info->flags = KVM_PPC_PAGE_SIZES_REAL;
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
info->flags |= KVM_PPC_1T_SEGMENTS;
info->slb_size = mmu_slb_size;
/* We only support these sizes for now, and no muti-size segments */
sps = &info->sps[0];
kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
return 0;
}
/* /*
* Get (and clear) the dirty memory log for a memory slot. * Get (and clear) the dirty memory log for a memory slot.
*/ */
...@@ -1582,12 +1505,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1582,12 +1505,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL; return EMULATE_FAIL;
} }
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{ {
return EMULATE_FAIL; return EMULATE_FAIL;
} }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ {
return EMULATE_FAIL; return EMULATE_FAIL;
} }
......
...@@ -548,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -548,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->exit_reason = KVM_EXIT_UNKNOWN; run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1; run->ready_for_interrupt_injection = 1;
/* We get here with MSR.EE=0, so enable it to be a nice citizen */
__hard_irq_enable();
trace_kvm_book3s_exit(exit_nr, vcpu); trace_kvm_book3s_exit(exit_nr, vcpu);
preempt_enable(); preempt_enable();
kvm_resched(vcpu); kvm_resched(vcpu);
...@@ -1155,6 +1158,31 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1155,6 +1158,31 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
return r; return r;
} }
#ifdef CONFIG_PPC64
int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
{
/* No flags */
info->flags = 0;
/* SLB is always 64 entries */
info->slb_size = 64;
/* Standard 4k base page size segment */
info->sps[0].page_shift = 12;
info->sps[0].slb_enc = 0;
info->sps[0].enc[0].page_shift = 12;
info->sps[0].enc[0].pte_enc = 0;
/* Standard 16M large page size segment */
info->sps[1].page_shift = 24;
info->sps[1].slb_enc = SLB_VSID_L;
info->sps[1].enc[0].page_shift = 24;
info->sps[1].enc[0].pte_enc = 0;
return 0;
}
#endif /* CONFIG_PPC64 */
int kvmppc_core_prepare_memory_region(struct kvm *kvm, int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem) struct kvm_userspace_memory_region *mem)
{ {
...@@ -1168,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, ...@@ -1168,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
int kvmppc_core_init_vm(struct kvm *kvm) int kvmppc_core_init_vm(struct kvm *kvm)
{ {
#ifdef CONFIG_PPC64
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
#endif
return 0; return 0;
} }
void kvmppc_core_destroy_vm(struct kvm *kvm) void kvmppc_core_destroy_vm(struct kvm *kvm)
{ {
#ifdef CONFIG_PPC64
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
#endif
} }
static int kvmppc_book3s_init(void) static int kvmppc_book3s_init(void)
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/anon_inodes.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h> #include <asm/kvm_book3s.h>
...@@ -211,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) ...@@ -211,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
return EMULATE_DONE; return EMULATE_DONE;
} }
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
unsigned long tce = kvmppc_get_gpr(vcpu, 6);
long rc;
rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
return EMULATE_DONE;
}
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
{ {
switch (cmd) { switch (cmd) {
...@@ -222,6 +238,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) ...@@ -222,6 +238,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
return kvmppc_h_pr_protect(vcpu); return kvmppc_h_pr_protect(vcpu);
case H_BULK_REMOVE: case H_BULK_REMOVE:
return kvmppc_h_pr_bulk_remove(vcpu); return kvmppc_h_pr_bulk_remove(vcpu);
case H_PUT_TCE:
return kvmppc_h_pr_put_tce(vcpu);
case H_CEDE: case H_CEDE:
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
......
...@@ -128,24 +128,25 @@ no_dcbz32_on: ...@@ -128,24 +128,25 @@ no_dcbz32_on:
/* First clear RI in our current MSR value */ /* First clear RI in our current MSR value */
li r0, MSR_RI li r0, MSR_RI
andc r6, r6, r0 andc r6, r6, r0
MTMSR_EERI(r6)
mtsrr0 r9
mtsrr1 r4
PPC_LL r0, SVCPU_R0(r3) PPC_LL r0, SVCPU_R0(r3)
PPC_LL r1, SVCPU_R1(r3) PPC_LL r1, SVCPU_R1(r3)
PPC_LL r2, SVCPU_R2(r3) PPC_LL r2, SVCPU_R2(r3)
PPC_LL r4, SVCPU_R4(r3)
PPC_LL r5, SVCPU_R5(r3) PPC_LL r5, SVCPU_R5(r3)
PPC_LL r6, SVCPU_R6(r3)
PPC_LL r7, SVCPU_R7(r3) PPC_LL r7, SVCPU_R7(r3)
PPC_LL r8, SVCPU_R8(r3) PPC_LL r8, SVCPU_R8(r3)
PPC_LL r9, SVCPU_R9(r3)
PPC_LL r10, SVCPU_R10(r3) PPC_LL r10, SVCPU_R10(r3)
PPC_LL r11, SVCPU_R11(r3) PPC_LL r11, SVCPU_R11(r3)
PPC_LL r12, SVCPU_R12(r3) PPC_LL r12, SVCPU_R12(r3)
PPC_LL r13, SVCPU_R13(r3) PPC_LL r13, SVCPU_R13(r3)
MTMSR_EERI(r6)
mtsrr0 r9
mtsrr1 r4
PPC_LL r4, SVCPU_R4(r3)
PPC_LL r6, SVCPU_R6(r3)
PPC_LL r9, SVCPU_R9(r3)
PPC_LL r3, (SVCPU_R3)(r3) PPC_LL r3, (SVCPU_R3)(r3)
RFI RFI
...@@ -197,6 +198,7 @@ kvmppc_interrupt: ...@@ -197,6 +198,7 @@ kvmppc_interrupt:
/* Save guest PC and MSR */ /* Save guest PC and MSR */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
mr r10, r12
andi. r0,r12,0x2 andi. r0,r12,0x2
beq 1f beq 1f
mfspr r3,SPRN_HSRR0 mfspr r3,SPRN_HSRR0
...@@ -316,23 +318,17 @@ no_dcbz32_off: ...@@ -316,23 +318,17 @@ no_dcbz32_off:
* Having set up SRR0/1 with the address where we want * Having set up SRR0/1 with the address where we want
* to continue with relocation on (potentially in module * to continue with relocation on (potentially in module
* space), we either just go straight there with rfi[d], * space), we either just go straight there with rfi[d],
* or we jump to an interrupt handler with bctr if there * or we jump to an interrupt handler if there is an
* is an interrupt to be handled first. In the latter * interrupt to be handled first. In the latter case,
* case, the rfi[d] at the end of the interrupt handler * the rfi[d] at the end of the interrupt handler will
* will get us back to where we want to continue. * get us back to where we want to continue.
*/ */
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq 1f
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beq 1f
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
1: mtctr r12
/* Register usage at this point: /* Register usage at this point:
* *
* R1 = host R1 * R1 = host R1
* R2 = host R2 * R2 = host R2
* R10 = raw exit handler id
* R12 = exit handler id * R12 = exit handler id
* R13 = shadow vcpu (32-bit) or PACA (64-bit) * R13 = shadow vcpu (32-bit) or PACA (64-bit)
* SVCPU.* = guest * * SVCPU.* = guest *
...@@ -342,12 +338,26 @@ no_dcbz32_off: ...@@ -342,12 +338,26 @@ no_dcbz32_off:
PPC_LL r6, HSTATE_HOST_MSR(r13) PPC_LL r6, HSTATE_HOST_MSR(r13)
PPC_LL r8, HSTATE_VMHANDLER(r13) PPC_LL r8, HSTATE_VMHANDLER(r13)
/* Restore host msr -> SRR1 */ #ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
andi. r0,r10,0x2
beq 1f
mtspr SPRN_HSRR1, r6
mtspr SPRN_HSRR0, r8
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
#endif
1: /* Restore host msr -> SRR1 */
mtsrr1 r6 mtsrr1 r6
/* Load highmem handler address */ /* Load highmem handler address */
mtsrr0 r8 mtsrr0 r8
/* RFI into the highmem handler, or jump to interrupt handler */ /* RFI into the highmem handler, or jump to interrupt handler */
beqctr cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beqa BOOK3S_INTERRUPT_EXTERNAL
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beqa BOOK3S_INTERRUPT_DECREMENTER
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
beqa BOOK3S_INTERRUPT_PERFMON
RFI RFI
kvmppc_handler_trampoline_exit_end: kvmppc_handler_trampoline_exit_end:
...@@ -75,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); ...@@ -75,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance); unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
/* low-level asm code to transfer guest state */ /* low-level asm code to transfer guest state */
void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
......
...@@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
int rs; int rs = get_rs(inst);
int rt; int rt = get_rt(inst);
switch (get_op(inst)) { switch (get_op(inst)) {
case 19: case 19:
...@@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (get_xop(inst)) { switch (get_xop(inst)) {
case OP_31_XOP_MFMSR: case OP_31_XOP_MFMSR:
rt = get_rt(inst);
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break; break;
case OP_31_XOP_MTMSR: case OP_31_XOP_MTMSR:
rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break; break;
case OP_31_XOP_WRTEE: case OP_31_XOP_WRTEE:
rs = get_rs(inst);
vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
| (kvmppc_get_gpr(vcpu, rs) & MSR_EE); | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
...@@ -105,22 +102,26 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -105,22 +102,26 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
* will return the wrong result if called for them in another context * will return the wrong result if called for them in another context
* (such as debugging). * (such as debugging).
*/ */
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) { switch (sprn) {
case SPRN_DEAR: case SPRN_DEAR:
vcpu->arch.shared->dar = spr_val; break; vcpu->arch.shared->dar = spr_val;
break;
case SPRN_ESR: case SPRN_ESR:
vcpu->arch.shared->esr = spr_val; break; vcpu->arch.shared->esr = spr_val;
break;
case SPRN_DBCR0: case SPRN_DBCR0:
vcpu->arch.dbcr0 = spr_val; break; vcpu->arch.dbcr0 = spr_val;
break;
case SPRN_DBCR1: case SPRN_DBCR1:
vcpu->arch.dbcr1 = spr_val; break; vcpu->arch.dbcr1 = spr_val;
break;
case SPRN_DBSR: case SPRN_DBSR:
vcpu->arch.dbsr &= ~spr_val; break; vcpu->arch.dbsr &= ~spr_val;
break;
case SPRN_TSR: case SPRN_TSR:
kvmppc_clr_tsr_bits(vcpu, spr_val); kvmppc_clr_tsr_bits(vcpu, spr_val);
break; break;
...@@ -134,13 +135,17 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) ...@@ -134,13 +135,17 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
* guest (PR-mode only). * guest (PR-mode only).
*/ */
case SPRN_SPRG4: case SPRN_SPRG4:
vcpu->arch.shared->sprg4 = spr_val; break; vcpu->arch.shared->sprg4 = spr_val;
break;
case SPRN_SPRG5: case SPRN_SPRG5:
vcpu->arch.shared->sprg5 = spr_val; break; vcpu->arch.shared->sprg5 = spr_val;
break;
case SPRN_SPRG6: case SPRN_SPRG6:
vcpu->arch.shared->sprg6 = spr_val; break; vcpu->arch.shared->sprg6 = spr_val;
break;
case SPRN_SPRG7: case SPRN_SPRG7:
vcpu->arch.shared->sprg7 = spr_val; break; vcpu->arch.shared->sprg7 = spr_val;
break;
case SPRN_IVPR: case SPRN_IVPR:
vcpu->arch.ivpr = spr_val; vcpu->arch.ivpr = spr_val;
...@@ -210,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) ...@@ -210,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
return emulated; return emulated;
} }
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
switch (sprn) { switch (sprn) {
case SPRN_IVPR: case SPRN_IVPR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; *spr_val = vcpu->arch.ivpr;
break;
case SPRN_DEAR: case SPRN_DEAR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; *spr_val = vcpu->arch.shared->dar;
break;
case SPRN_ESR: case SPRN_ESR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break; *spr_val = vcpu->arch.shared->esr;
break;
case SPRN_DBCR0: case SPRN_DBCR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; *spr_val = vcpu->arch.dbcr0;
break;
case SPRN_DBCR1: case SPRN_DBCR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; *spr_val = vcpu->arch.dbcr1;
break;
case SPRN_DBSR: case SPRN_DBSR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; *spr_val = vcpu->arch.dbsr;
break;
case SPRN_TSR: case SPRN_TSR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break; *spr_val = vcpu->arch.tsr;
break;
case SPRN_TCR: case SPRN_TCR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break; *spr_val = vcpu->arch.tcr;
break;
case SPRN_IVOR0: case SPRN_IVOR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
break; break;
case SPRN_IVOR1: case SPRN_IVOR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
break; break;
case SPRN_IVOR2: case SPRN_IVOR2:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
break; break;
case SPRN_IVOR3: case SPRN_IVOR3:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
break; break;
case SPRN_IVOR4: case SPRN_IVOR4:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
break; break;
case SPRN_IVOR5: case SPRN_IVOR5:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
break; break;
case SPRN_IVOR6: case SPRN_IVOR6:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
break; break;
case SPRN_IVOR7: case SPRN_IVOR7:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
break; break;
case SPRN_IVOR8: case SPRN_IVOR8:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
break; break;
case SPRN_IVOR9: case SPRN_IVOR9:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
break; break;
case SPRN_IVOR10: case SPRN_IVOR10:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
break; break;
case SPRN_IVOR11: case SPRN_IVOR11:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
break; break;
case SPRN_IVOR12: case SPRN_IVOR12:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
break; break;
case SPRN_IVOR13: case SPRN_IVOR13:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
break; break;
case SPRN_IVOR14: case SPRN_IVOR14:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
break; break;
case SPRN_IVOR15: case SPRN_IVOR15:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
break; break;
default: default:
......
...@@ -419,13 +419,13 @@ lightweight_exit: ...@@ -419,13 +419,13 @@ lightweight_exit:
* written directly to the shared area, so we * written directly to the shared area, so we
* need to reload them here with the guest's values. * need to reload them here with the guest's values.
*/ */
lwz r3, VCPU_SHARED_SPRG4(r5) PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
mtspr SPRN_SPRG4W, r3 mtspr SPRN_SPRG4W, r3
lwz r3, VCPU_SHARED_SPRG5(r5) PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
mtspr SPRN_SPRG5W, r3 mtspr SPRN_SPRG5W, r3
lwz r3, VCPU_SHARED_SPRG6(r5) PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
mtspr SPRN_SPRG6W, r3 mtspr SPRN_SPRG6W, r3
lwz r3, VCPU_SHARED_SPRG7(r5) PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
mtspr SPRN_SPRG7W, r3 mtspr SPRN_SPRG7W, r3
#ifdef CONFIG_KVM_EXIT_TIMING #ifdef CONFIG_KVM_EXIT_TIMING
......
...@@ -87,17 +87,13 @@ ...@@ -87,17 +87,13 @@
mfspr r8, SPRN_TBRL mfspr r8, SPRN_TBRL
mfspr r9, SPRN_TBRU mfspr r9, SPRN_TBRU
cmpw r9, r7 cmpw r9, r7
PPC_STL r8, VCPU_TIMING_EXIT_TBL(r4) stw r8, VCPU_TIMING_EXIT_TBL(r4)
bne- 1b bne- 1b
PPC_STL r9, VCPU_TIMING_EXIT_TBU(r4) stw r9, VCPU_TIMING_EXIT_TBU(r4)
#endif #endif
oris r8, r6, MSR_CE@h oris r8, r6, MSR_CE@h
#ifdef CONFIG_64BIT PPC_STD(r6, VCPU_SHARED_MSR, r11)
std r6, (VCPU_SHARED_MSR)(r11)
#else
stw r6, (VCPU_SHARED_MSR + 4)(r11)
#endif
ori r8, r8, MSR_ME | MSR_RI ori r8, r8, MSR_ME | MSR_RI
PPC_STL r5, VCPU_PC(r4) PPC_STL r5, VCPU_PC(r4)
...@@ -220,7 +216,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) ...@@ -220,7 +216,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
PPC_STL r4, VCPU_GPR(r4)(r11) PPC_STL r4, VCPU_GPR(r4)(r11)
PPC_LL r4, THREAD_NORMSAVE(0)(r10) PPC_LL r4, THREAD_NORMSAVE(0)(r10)
PPC_STL r5, VCPU_GPR(r5)(r11) PPC_STL r5, VCPU_GPR(r5)(r11)
PPC_STL r13, VCPU_CR(r11) stw r13, VCPU_CR(r11)
mfspr r5, \srr0 mfspr r5, \srr0
PPC_STL r3, VCPU_GPR(r10)(r11) PPC_STL r3, VCPU_GPR(r10)(r11)
PPC_LL r3, THREAD_NORMSAVE(2)(r10) PPC_LL r3, THREAD_NORMSAVE(2)(r10)
...@@ -247,7 +243,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) ...@@ -247,7 +243,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
PPC_STL r4, VCPU_GPR(r4)(r11) PPC_STL r4, VCPU_GPR(r4)(r11)
PPC_LL r4, GPR9(r8) PPC_LL r4, GPR9(r8)
PPC_STL r5, VCPU_GPR(r5)(r11) PPC_STL r5, VCPU_GPR(r5)(r11)
PPC_STL r9, VCPU_CR(r11) stw r9, VCPU_CR(r11)
mfspr r5, \srr0 mfspr r5, \srr0
PPC_STL r3, VCPU_GPR(r8)(r11) PPC_STL r3, VCPU_GPR(r8)(r11)
PPC_LL r3, GPR10(r8) PPC_LL r3, GPR10(r8)
...@@ -256,10 +252,10 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) ...@@ -256,10 +252,10 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
mfspr r6, \srr1 mfspr r6, \srr1
PPC_LL r4, GPR11(r8) PPC_LL r4, GPR11(r8)
PPC_STL r7, VCPU_GPR(r7)(r11) PPC_STL r7, VCPU_GPR(r7)(r11)
PPC_STL r8, VCPU_GPR(r8)(r11)
PPC_STL r3, VCPU_GPR(r10)(r11) PPC_STL r3, VCPU_GPR(r10)(r11)
mfctr r7 mfctr r7
PPC_STL r12, VCPU_GPR(r12)(r11) PPC_STL r12, VCPU_GPR(r12)(r11)
PPC_STL r13, VCPU_GPR(r13)(r11)
PPC_STL r4, VCPU_GPR(r11)(r11) PPC_STL r4, VCPU_GPR(r11)(r11)
PPC_STL r7, VCPU_CTR(r11) PPC_STL r7, VCPU_CTR(r11)
mr r4, r11 mr r4, r11
...@@ -319,14 +315,14 @@ _GLOBAL(kvmppc_resume_host) ...@@ -319,14 +315,14 @@ _GLOBAL(kvmppc_resume_host)
mfspr r6, SPRN_SPRG4 mfspr r6, SPRN_SPRG4
PPC_STL r5, VCPU_LR(r4) PPC_STL r5, VCPU_LR(r4)
mfspr r7, SPRN_SPRG5 mfspr r7, SPRN_SPRG5
PPC_STL r3, VCPU_VRSAVE(r4) stw r3, VCPU_VRSAVE(r4)
PPC_STL r6, VCPU_SHARED_SPRG4(r11) PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
mfspr r8, SPRN_SPRG6 mfspr r8, SPRN_SPRG6
PPC_STL r7, VCPU_SHARED_SPRG5(r11) PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
mfspr r9, SPRN_SPRG7 mfspr r9, SPRN_SPRG7
PPC_STL r8, VCPU_SHARED_SPRG6(r11) PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
mfxer r3 mfxer r3
PPC_STL r9, VCPU_SHARED_SPRG7(r11) PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
/* save guest MAS registers and restore host mas4 & mas6 */ /* save guest MAS registers and restore host mas4 & mas6 */
mfspr r5, SPRN_MAS0 mfspr r5, SPRN_MAS0
...@@ -335,11 +331,7 @@ _GLOBAL(kvmppc_resume_host) ...@@ -335,11 +331,7 @@ _GLOBAL(kvmppc_resume_host)
stw r5, VCPU_SHARED_MAS0(r11) stw r5, VCPU_SHARED_MAS0(r11)
mfspr r7, SPRN_MAS2 mfspr r7, SPRN_MAS2
stw r6, VCPU_SHARED_MAS1(r11) stw r6, VCPU_SHARED_MAS1(r11)
#ifdef CONFIG_64BIT PPC_STD(r7, VCPU_SHARED_MAS2, r11)
std r7, (VCPU_SHARED_MAS2)(r11)
#else
stw r7, (VCPU_SHARED_MAS2 + 4)(r11)
#endif
mfspr r5, SPRN_MAS3 mfspr r5, SPRN_MAS3
mfspr r6, SPRN_MAS4 mfspr r6, SPRN_MAS4
stw r5, VCPU_SHARED_MAS7_3+4(r11) stw r5, VCPU_SHARED_MAS7_3+4(r11)
...@@ -527,11 +519,7 @@ lightweight_exit: ...@@ -527,11 +519,7 @@ lightweight_exit:
stw r3, VCPU_HOST_MAS6(r4) stw r3, VCPU_HOST_MAS6(r4)
lwz r3, VCPU_SHARED_MAS0(r11) lwz r3, VCPU_SHARED_MAS0(r11)
lwz r5, VCPU_SHARED_MAS1(r11) lwz r5, VCPU_SHARED_MAS1(r11)
#ifdef CONFIG_64BIT PPC_LD(r6, VCPU_SHARED_MAS2, r11)
ld r6, (VCPU_SHARED_MAS2)(r11)
#else
lwz r6, (VCPU_SHARED_MAS2 + 4)(r11)
#endif
lwz r7, VCPU_SHARED_MAS7_3+4(r11) lwz r7, VCPU_SHARED_MAS7_3+4(r11)
lwz r8, VCPU_SHARED_MAS4(r11) lwz r8, VCPU_SHARED_MAS4(r11)
mtspr SPRN_MAS0, r3 mtspr SPRN_MAS0, r3
...@@ -549,13 +537,13 @@ lightweight_exit: ...@@ -549,13 +537,13 @@ lightweight_exit:
* SPRGs, so we need to reload them here with the guest's values. * SPRGs, so we need to reload them here with the guest's values.
*/ */
lwz r3, VCPU_VRSAVE(r4) lwz r3, VCPU_VRSAVE(r4)
lwz r5, VCPU_SHARED_SPRG4(r11) PPC_LD(r5, VCPU_SHARED_SPRG4, r11)
mtspr SPRN_VRSAVE, r3 mtspr SPRN_VRSAVE, r3
lwz r6, VCPU_SHARED_SPRG5(r11) PPC_LD(r6, VCPU_SHARED_SPRG5, r11)
mtspr SPRN_SPRG4W, r5 mtspr SPRN_SPRG4W, r5
lwz r7, VCPU_SHARED_SPRG6(r11) PPC_LD(r7, VCPU_SHARED_SPRG6, r11)
mtspr SPRN_SPRG5W, r6 mtspr SPRN_SPRG5W, r6
lwz r8, VCPU_SHARED_SPRG7(r11) PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
mtspr SPRN_SPRG6W, r7 mtspr SPRN_SPRG6W, r7
mtspr SPRN_SPRG7W, r8 mtspr SPRN_SPRG7W, r8
...@@ -563,13 +551,9 @@ lightweight_exit: ...@@ -563,13 +551,9 @@ lightweight_exit:
PPC_LL r3, VCPU_LR(r4) PPC_LL r3, VCPU_LR(r4)
PPC_LL r5, VCPU_XER(r4) PPC_LL r5, VCPU_XER(r4)
PPC_LL r6, VCPU_CTR(r4) PPC_LL r6, VCPU_CTR(r4)
PPC_LL r7, VCPU_CR(r4) lwz r7, VCPU_CR(r4)
PPC_LL r8, VCPU_PC(r4) PPC_LL r8, VCPU_PC(r4)
#ifdef CONFIG_64BIT PPC_LD(r9, VCPU_SHARED_MSR, r11)
ld r9, (VCPU_SHARED_MSR)(r11)
#else
lwz r9, (VCPU_SHARED_MSR + 4)(r11)
#endif
PPC_LL r0, VCPU_GPR(r0)(r4) PPC_LL r0, VCPU_GPR(r0)(r4)
PPC_LL r1, VCPU_GPR(r1)(r4) PPC_LL r1, VCPU_GPR(r1)(r4)
PPC_LL r2, VCPU_GPR(r2)(r4) PPC_LL r2, VCPU_GPR(r2)(r4)
...@@ -590,9 +574,9 @@ lightweight_exit: ...@@ -590,9 +574,9 @@ lightweight_exit:
mfspr r9, SPRN_TBRL mfspr r9, SPRN_TBRL
mfspr r8, SPRN_TBRU mfspr r8, SPRN_TBRU
cmpw r8, r6 cmpw r8, r6
PPC_STL r9, VCPU_TIMING_LAST_ENTER_TBL(r4) stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
bne 1b bne 1b
PPC_STL r8, VCPU_TIMING_LAST_ENTER_TBU(r4) stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
#endif #endif
/* /*
......
...@@ -89,6 +89,10 @@ struct kvmppc_vcpu_e500 { ...@@ -89,6 +89,10 @@ struct kvmppc_vcpu_e500 {
u64 *g2h_tlb1_map; u64 *g2h_tlb1_map;
unsigned int *h2g_tlb1_rmap; unsigned int *h2g_tlb1_rmap;
/* Minimum and maximum address mapped my TLB1 */
unsigned long tlb1_min_eaddr;
unsigned long tlb1_max_eaddr;
#ifdef CONFIG_KVM_E500V2 #ifdef CONFIG_KVM_E500V2
u32 pid[E500_PID_NUM]; u32 pid[E500_PID_NUM];
......
...@@ -86,9 +86,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -86,9 +86,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
int ra; int ra = get_ra(inst);
int rb; int rb = get_rb(inst);
int rt; int rt = get_rt(inst);
switch (get_op(inst)) { switch (get_op(inst)) {
case 31: case 31:
...@@ -96,11 +96,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -96,11 +96,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
#ifdef CONFIG_KVM_E500MC #ifdef CONFIG_KVM_E500MC
case XOP_MSGSND: case XOP_MSGSND:
emulated = kvmppc_e500_emul_msgsnd(vcpu, get_rb(inst)); emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
break; break;
case XOP_MSGCLR: case XOP_MSGCLR:
emulated = kvmppc_e500_emul_msgclr(vcpu, get_rb(inst)); emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
break; break;
#endif #endif
...@@ -113,20 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -113,20 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case XOP_TLBSX: case XOP_TLBSX:
rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
break; break;
case XOP_TLBILX: case XOP_TLBILX:
ra = get_ra(inst);
rb = get_rb(inst);
rt = get_rt(inst);
emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb); emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb);
break; break;
case XOP_TLBIVAX: case XOP_TLBIVAX:
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
break; break;
...@@ -146,11 +140,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -146,11 +140,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated; return emulated;
} }
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) { switch (sprn) {
#ifndef CONFIG_KVM_BOOKE_HV #ifndef CONFIG_KVM_BOOKE_HV
...@@ -160,25 +153,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) ...@@ -160,25 +153,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_PID1: case SPRN_PID1:
if (spr_val != 0) if (spr_val != 0)
return EMULATE_FAIL; return EMULATE_FAIL;
vcpu_e500->pid[1] = spr_val; break; vcpu_e500->pid[1] = spr_val;
break;
case SPRN_PID2: case SPRN_PID2:
if (spr_val != 0) if (spr_val != 0)
return EMULATE_FAIL; return EMULATE_FAIL;
vcpu_e500->pid[2] = spr_val; break; vcpu_e500->pid[2] = spr_val;
break;
case SPRN_MAS0: case SPRN_MAS0:
vcpu->arch.shared->mas0 = spr_val; break; vcpu->arch.shared->mas0 = spr_val;
break;
case SPRN_MAS1: case SPRN_MAS1:
vcpu->arch.shared->mas1 = spr_val; break; vcpu->arch.shared->mas1 = spr_val;
break;
case SPRN_MAS2: case SPRN_MAS2:
vcpu->arch.shared->mas2 = spr_val; break; vcpu->arch.shared->mas2 = spr_val;
break;
case SPRN_MAS3: case SPRN_MAS3:
vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= spr_val; vcpu->arch.shared->mas7_3 |= spr_val;
break; break;
case SPRN_MAS4: case SPRN_MAS4:
vcpu->arch.shared->mas4 = spr_val; break; vcpu->arch.shared->mas4 = spr_val;
break;
case SPRN_MAS6: case SPRN_MAS6:
vcpu->arch.shared->mas6 = spr_val; break; vcpu->arch.shared->mas6 = spr_val;
break;
case SPRN_MAS7: case SPRN_MAS7:
vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
...@@ -189,11 +189,14 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) ...@@ -189,11 +189,14 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
break; break;
case SPRN_L1CSR1: case SPRN_L1CSR1:
vcpu_e500->l1csr1 = spr_val; break; vcpu_e500->l1csr1 = spr_val;
break;
case SPRN_HID0: case SPRN_HID0:
vcpu_e500->hid0 = spr_val; break; vcpu_e500->hid0 = spr_val;
break;
case SPRN_HID1: case SPRN_HID1:
vcpu_e500->hid1 = spr_val; break; vcpu_e500->hid1 = spr_val;
break;
case SPRN_MMUCSR0: case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
...@@ -222,90 +225,103 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) ...@@ -222,90 +225,103 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
break; break;
#endif #endif
default: default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
} }
return emulated; return emulated;
} }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
switch (sprn) { switch (sprn) {
#ifndef CONFIG_KVM_BOOKE_HV #ifndef CONFIG_KVM_BOOKE_HV
unsigned long val;
case SPRN_PID: case SPRN_PID:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; *spr_val = vcpu_e500->pid[0];
break;
case SPRN_PID1: case SPRN_PID1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; *spr_val = vcpu_e500->pid[1];
break;
case SPRN_PID2: case SPRN_PID2:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; *spr_val = vcpu_e500->pid[2];
break;
case SPRN_MAS0: case SPRN_MAS0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break; *spr_val = vcpu->arch.shared->mas0;
break;
case SPRN_MAS1: case SPRN_MAS1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break; *spr_val = vcpu->arch.shared->mas1;
break;
case SPRN_MAS2: case SPRN_MAS2:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break; *spr_val = vcpu->arch.shared->mas2;
break;
case SPRN_MAS3: case SPRN_MAS3:
val = (u32)vcpu->arch.shared->mas7_3; *spr_val = (u32)vcpu->arch.shared->mas7_3;
kvmppc_set_gpr(vcpu, rt, val);
break; break;
case SPRN_MAS4: case SPRN_MAS4:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break; *spr_val = vcpu->arch.shared->mas4;
break;
case SPRN_MAS6: case SPRN_MAS6:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break; *spr_val = vcpu->arch.shared->mas6;
break;
case SPRN_MAS7: case SPRN_MAS7:
val = vcpu->arch.shared->mas7_3 >> 32; *spr_val = vcpu->arch.shared->mas7_3 >> 32;
kvmppc_set_gpr(vcpu, rt, val);
break; break;
#endif #endif
case SPRN_TLB0CFG: case SPRN_TLB0CFG:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[0]); break; *spr_val = vcpu->arch.tlbcfg[0];
break;
case SPRN_TLB1CFG: case SPRN_TLB1CFG:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[1]); break; *spr_val = vcpu->arch.tlbcfg[1];
break;
case SPRN_L1CSR0: case SPRN_L1CSR0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; *spr_val = vcpu_e500->l1csr0;
break;
case SPRN_L1CSR1: case SPRN_L1CSR1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; *spr_val = vcpu_e500->l1csr1;
break;
case SPRN_HID0: case SPRN_HID0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; *spr_val = vcpu_e500->hid0;
break;
case SPRN_HID1: case SPRN_HID1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; *spr_val = vcpu_e500->hid1;
break;
case SPRN_SVR: case SPRN_SVR:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break; *spr_val = vcpu_e500->svr;
break;
case SPRN_MMUCSR0: case SPRN_MMUCSR0:
kvmppc_set_gpr(vcpu, rt, 0); break; *spr_val = 0;
break;
case SPRN_MMUCFG: case SPRN_MMUCFG:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucfg); break; *spr_val = vcpu->arch.mmucfg;
break;
/* extra exceptions */ /* extra exceptions */
case SPRN_IVOR32: case SPRN_IVOR32:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
break; break;
case SPRN_IVOR33: case SPRN_IVOR33:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
break; break;
case SPRN_IVOR34: case SPRN_IVOR34:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
break; break;
case SPRN_IVOR35: case SPRN_IVOR35:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
break; break;
#ifdef CONFIG_KVM_BOOKE_HV #ifdef CONFIG_KVM_BOOKE_HV
case SPRN_IVOR36: case SPRN_IVOR36:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
break; break;
case SPRN_IVOR37: case SPRN_IVOR37:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]); *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
break; break;
#endif #endif
default: default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
} }
return emulated; return emulated;
......
...@@ -261,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -261,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
set_base = gtlb0_set_base(vcpu_e500, eaddr); set_base = gtlb0_set_base(vcpu_e500, eaddr);
size = vcpu_e500->gtlb_params[0].ways; size = vcpu_e500->gtlb_params[0].ways;
} else { } else {
if (eaddr < vcpu_e500->tlb1_min_eaddr ||
eaddr > vcpu_e500->tlb1_max_eaddr)
return -1;
set_base = 0; set_base = 0;
} }
...@@ -583,6 +586,65 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -583,6 +586,65 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
return victim; return victim;
} }
static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int size = vcpu_e500->gtlb_params[1].entries;
unsigned int offset;
gva_t eaddr;
int i;
vcpu_e500->tlb1_min_eaddr = ~0UL;
vcpu_e500->tlb1_max_eaddr = 0;
offset = vcpu_e500->gtlb_offset[1];
for (i = 0; i < size; i++) {
struct kvm_book3e_206_tlb_entry *tlbe =
&vcpu_e500->gtlb_arch[offset + i];
if (!get_tlb_v(tlbe))
continue;
eaddr = get_tlb_eaddr(tlbe);
vcpu_e500->tlb1_min_eaddr =
min(vcpu_e500->tlb1_min_eaddr, eaddr);
eaddr = get_tlb_end(tlbe);
vcpu_e500->tlb1_max_eaddr =
max(vcpu_e500->tlb1_max_eaddr, eaddr);
}
}
static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
struct kvm_book3e_206_tlb_entry *gtlbe)
{
unsigned long start, end, size;
size = get_tlb_bytes(gtlbe);
start = get_tlb_eaddr(gtlbe) & ~(size - 1);
end = start + size - 1;
return vcpu_e500->tlb1_min_eaddr == start ||
vcpu_e500->tlb1_max_eaddr == end;
}
/* This function is supposed to be called for a adding a new valid tlb entry */
static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
struct kvm_book3e_206_tlb_entry *gtlbe)
{
unsigned long start, end, size;
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
if (!get_tlb_v(gtlbe))
return;
size = get_tlb_bytes(gtlbe);
start = get_tlb_eaddr(gtlbe) & ~(size - 1);
end = start + size - 1;
vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
}
static inline int kvmppc_e500_gtlbe_invalidate( static inline int kvmppc_e500_gtlbe_invalidate(
struct kvmppc_vcpu_e500 *vcpu_e500, struct kvmppc_vcpu_e500 *vcpu_e500,
int tlbsel, int esel) int tlbsel, int esel)
...@@ -593,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate( ...@@ -593,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate(
if (unlikely(get_tlb_iprot(gtlbe))) if (unlikely(get_tlb_iprot(gtlbe)))
return -1; return -1;
if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
kvmppc_recalc_tlb1map_range(vcpu_e500);
gtlbe->mas1 = 0; gtlbe->mas1 = 0;
return 0; return 0;
...@@ -792,14 +857,19 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) ...@@ -792,14 +857,19 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
int tlbsel, esel, stlbsel, sesel; int tlbsel, esel, stlbsel, sesel;
int recal = 0;
tlbsel = get_tlb_tlbsel(vcpu); tlbsel = get_tlb_tlbsel(vcpu);
esel = get_tlb_esel(vcpu, tlbsel); esel = get_tlb_esel(vcpu, tlbsel);
gtlbe = get_entry(vcpu_e500, tlbsel, esel); gtlbe = get_entry(vcpu_e500, tlbsel, esel);
if (get_tlb_v(gtlbe)) if (get_tlb_v(gtlbe)) {
inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
if ((tlbsel == 1) &&
kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
recal = 1;
}
gtlbe->mas1 = vcpu->arch.shared->mas1; gtlbe->mas1 = vcpu->arch.shared->mas1;
gtlbe->mas2 = vcpu->arch.shared->mas2; gtlbe->mas2 = vcpu->arch.shared->mas2;
...@@ -808,6 +878,18 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) ...@@ -808,6 +878,18 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
gtlbe->mas2, gtlbe->mas7_3); gtlbe->mas2, gtlbe->mas7_3);
if (tlbsel == 1) {
/*
* If a valid tlb1 entry is overwritten then recalculate the
* min/max TLB1 map address range otherwise no need to look
* in tlb1 array.
*/
if (recal)
kvmppc_recalc_tlb1map_range(vcpu_e500);
else
kvmppc_set_tlb1map_range(vcpu, gtlbe);
}
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) { if (tlbe_is_host_safe(vcpu, gtlbe)) {
u64 eaddr; u64 eaddr;
...@@ -1145,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, ...@@ -1145,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
vcpu_e500->gtlb_params[1].sets = 1; vcpu_e500->gtlb_params[1].sets = 1;
kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0; return 0;
err_put_page: err_put_page:
...@@ -1163,7 +1246,7 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, ...@@ -1163,7 +1246,7 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
struct kvm_dirty_tlb *dirty) struct kvm_dirty_tlb *dirty)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
kvmppc_recalc_tlb1map_range(vcpu_e500);
clear_tlb_refs(vcpu_e500); clear_tlb_refs(vcpu_e500);
return 0; return 0;
} }
...@@ -1272,6 +1355,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) ...@@ -1272,6 +1355,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
vcpu->arch.tlbcfg[1] |= vcpu->arch.tlbcfg[1] |=
vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0; return 0;
err: err:
......
This diff is collapsed.
...@@ -244,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -244,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext)
r = KVM_COALESCED_MMIO_PAGE_OFFSET; r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break; break;
#endif #endif
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE: case KVM_CAP_SPAPR_TCE:
r = 1; r = 1;
break; break;
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_SMT: case KVM_CAP_PPC_SMT:
r = threads_per_core; r = threads_per_core;
break; break;
...@@ -277,6 +279,11 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -277,6 +279,11 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_MAX_VCPUS: case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS; r = KVM_MAX_VCPUS;
break; break;
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_PPC_GET_SMMU_INFO:
r = 1;
break;
#endif
default: default:
r = 0; r = 0;
break; break;
...@@ -716,7 +723,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -716,7 +723,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break; break;
} }
#endif #endif
default: default:
r = -EINVAL; r = -EINVAL;
} }
...@@ -773,7 +779,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -773,7 +779,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
break; break;
} }
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_PPC_BOOK3S_64
case KVM_CREATE_SPAPR_TCE: { case KVM_CREATE_SPAPR_TCE: {
struct kvm_create_spapr_tce create_tce; struct kvm_create_spapr_tce create_tce;
struct kvm *kvm = filp->private_data; struct kvm *kvm = filp->private_data;
...@@ -784,7 +790,9 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -784,7 +790,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
goto out; goto out;
} }
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_ALLOCATE_RMA: { case KVM_ALLOCATE_RMA: {
struct kvm *kvm = filp->private_data; struct kvm *kvm = filp->private_data;
struct kvm_allocate_rma rma; struct kvm_allocate_rma rma;
...@@ -796,6 +804,18 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -796,6 +804,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
} }
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_PPC_GET_SMMU_INFO: {
struct kvm *kvm = filp->private_data;
struct kvm_ppc_smmu_info info;
memset(&info, 0, sizeof(info));
r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
r = -EFAULT;
break;
}
#endif /* CONFIG_PPC_BOOK3S_64 */
default: default:
r = -ENOTTY; r = -ENOTTY;
} }
......
...@@ -449,6 +449,30 @@ struct kvm_ppc_pvinfo { ...@@ -449,6 +449,30 @@ struct kvm_ppc_pvinfo {
__u8 pad[108]; __u8 pad[108];
}; };
/* for KVM_PPC_GET_SMMU_INFO */
#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
struct kvm_ppc_one_page_size {
__u32 page_shift; /* Page shift (or 0) */
__u32 pte_enc; /* Encoding in the HPTE (>>12) */
};
struct kvm_ppc_one_seg_page_size {
__u32 page_shift; /* Base page shift of segment (or 0) */
__u32 slb_enc; /* SLB encoding for BookS */
struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
#define KVM_PPC_1T_SEGMENTS 0x00000002
struct kvm_ppc_smmu_info {
__u64 flags;
__u32 slb_size;
__u32 pad;
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
#define KVMIO 0xAE #define KVMIO 0xAE
/* machine type bits, to be used as argument to KVM_CREATE_VM */ /* machine type bits, to be used as argument to KVM_CREATE_VM */
...@@ -591,6 +615,7 @@ struct kvm_ppc_pvinfo { ...@@ -591,6 +615,7 @@ struct kvm_ppc_pvinfo {
#define KVM_CAP_PCI_2_3 75 #define KVM_CAP_PCI_2_3 75
#define KVM_CAP_KVMCLOCK_CTRL 76 #define KVM_CAP_KVMCLOCK_CTRL 76
#define KVM_CAP_SIGNAL_MSI 77 #define KVM_CAP_SIGNAL_MSI 77
#define KVM_CAP_PPC_GET_SMMU_INFO 78
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -800,6 +825,8 @@ struct kvm_s390_ucas_mapping { ...@@ -800,6 +825,8 @@ struct kvm_s390_ucas_mapping {
struct kvm_assigned_pci_dev) struct kvm_assigned_pci_dev)
/* Available with KVM_CAP_SIGNAL_MSI */ /* Available with KVM_CAP_SIGNAL_MSI */
#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi) #define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
#define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info)
/* /*
* ioctls for vcpu fds * ioctls for vcpu fds
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment