Commit 715958f9 authored by Radim Krčmář's avatar Radim Krčmář

Merge tag 'kvm_mips_4.12_1' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/kvm-mips

From: James Hogan <james.hogan@imgtec.com>

KVM: MIPS: VZ support, Octeon III, and TLBR

Add basic support for the MIPS Virtualization Module (generally known as
MIPS VZ) in KVM. We primarily support the ImgTec P5600, P6600, I6400,
and Cavium Octeon III cores so far. Support is included for the
following VZ / guest hardware features:
- MIPS32 and MIPS64, r5 (VZ requires r5 or later) and r6
- TLBs with GuestID (IMG cores) or Root ASID Dealias (Octeon III)
- Shared physical root/guest TLB (IMG cores)
- FPU / MSA
- Cop0 timer (up to 1GHz for now due to soft timer limit)
- Segmentation control (EVA)
- Hardware page table walker (HTW) both for root and guest TLB

Also included is a proper implementation of the TLBR instruction for the
trap & emulate MIPS KVM implementation.

Preliminary MIPS architecture changes are applied directly with Ralf's
ack.
parents e55fe3cc dc44abd6
...@@ -115,12 +115,17 @@ will access the virtual machine's physical address space; offset zero ...@@ -115,12 +115,17 @@ will access the virtual machine's physical address space; offset zero
corresponds to guest physical address zero. Use of mmap() on a VM fd corresponds to guest physical address zero. Use of mmap() on a VM fd
is discouraged if userspace memory allocation (KVM_CAP_USER_MEMORY) is is discouraged if userspace memory allocation (KVM_CAP_USER_MEMORY) is
available. available.
You most certainly want to use 0 as machine type. You probably want to use 0 as machine type.
In order to create user controlled virtual machines on S390, check In order to create user controlled virtual machines on S390, check
KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
privileged user (CAP_SYS_ADMIN). privileged user (CAP_SYS_ADMIN).
To use hardware assisted virtualization on MIPS (VZ ASE) rather than
the default trap & emulate implementation (which changes the virtual
memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
flag KVM_VM_MIPS_VZ.
4.3 KVM_GET_MSR_INDEX_LIST 4.3 KVM_GET_MSR_INDEX_LIST
...@@ -2068,11 +2073,23 @@ registers, find a list below: ...@@ -2068,11 +2073,23 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64 MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64
MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64 MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64
MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64 MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64
MIPS | KVM_REG_MIPS_CP0_CONTEXTCONFIG| 32
MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64 MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64
MIPS | KVM_REG_MIPS_CP0_XCONTEXTCONFIG| 64
MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32 MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32
MIPS | KVM_REG_MIPS_CP0_PAGEGRAIN | 32
MIPS | KVM_REG_MIPS_CP0_SEGCTL0 | 64
MIPS | KVM_REG_MIPS_CP0_SEGCTL1 | 64
MIPS | KVM_REG_MIPS_CP0_SEGCTL2 | 64
MIPS | KVM_REG_MIPS_CP0_PWBASE | 64
MIPS | KVM_REG_MIPS_CP0_PWFIELD | 64
MIPS | KVM_REG_MIPS_CP0_PWSIZE | 64
MIPS | KVM_REG_MIPS_CP0_WIRED | 32 MIPS | KVM_REG_MIPS_CP0_WIRED | 32
MIPS | KVM_REG_MIPS_CP0_PWCTL | 32
MIPS | KVM_REG_MIPS_CP0_HWRENA | 32 MIPS | KVM_REG_MIPS_CP0_HWRENA | 32
MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64 MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64
MIPS | KVM_REG_MIPS_CP0_BADINSTR | 32
MIPS | KVM_REG_MIPS_CP0_BADINSTRP | 32
MIPS | KVM_REG_MIPS_CP0_COUNT | 32 MIPS | KVM_REG_MIPS_CP0_COUNT | 32
MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64 MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64
MIPS | KVM_REG_MIPS_CP0_COMPARE | 32 MIPS | KVM_REG_MIPS_CP0_COMPARE | 32
...@@ -2089,6 +2106,7 @@ registers, find a list below: ...@@ -2089,6 +2106,7 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32 MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32 MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32 MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32
MIPS | KVM_REG_MIPS_CP0_XCONTEXT | 64
MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64 MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64 MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64 MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64
...@@ -2096,6 +2114,7 @@ registers, find a list below: ...@@ -2096,6 +2114,7 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64 MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64 MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64 MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64
MIPS | KVM_REG_MIPS_CP0_MAAR(0..63) | 64
MIPS | KVM_REG_MIPS_COUNT_CTL | 64 MIPS | KVM_REG_MIPS_COUNT_CTL | 64
MIPS | KVM_REG_MIPS_COUNT_RESUME | 64 MIPS | KVM_REG_MIPS_COUNT_RESUME | 64
MIPS | KVM_REG_MIPS_COUNT_HZ | 64 MIPS | KVM_REG_MIPS_COUNT_HZ | 64
...@@ -2162,6 +2181,10 @@ hardware, host kernel, guest, and whether XPA is present in the guest, i.e. ...@@ -2162,6 +2181,10 @@ hardware, host kernel, guest, and whether XPA is present in the guest, i.e.
with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and
the PFNX field starting at bit 30. the PFNX field starting at bit 30.
MIPS MAARs (see KVM_REG_MIPS_CP0_MAAR(*) above) have the following id bit
patterns:
0x7030 0000 0001 01 <reg:8>
MIPS KVM control registers (see above) have the following id bit patterns: MIPS KVM control registers (see above) have the following id bit patterns:
0x7030 0000 0002 <reg:16> 0x7030 0000 0002 <reg:16>
...@@ -4210,3 +4233,68 @@ This capability, if KVM_CHECK_EXTENSION indicates that it is ...@@ -4210,3 +4233,68 @@ This capability, if KVM_CHECK_EXTENSION indicates that it is
available, means that that the kernel can support guests using the available, means that that the kernel can support guests using the
hashed page table MMU defined in Power ISA V3.00 (as implemented in hashed page table MMU defined in Power ISA V3.00 (as implemented in
the POWER9 processor), including in-memory segment tables. the POWER9 processor), including in-memory segment tables.
8.5 KVM_CAP_MIPS_VZ
Architectures: mips
This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
it is available, means that full hardware assisted virtualization capabilities
of the hardware are available for use through KVM. An appropriate
KVM_VM_MIPS_* type must be passed to KVM_CREATE_VM to create a VM which
utilises it.
If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
available, it means that the VM is using full hardware assisted virtualization
capabilities of the hardware. This is useful to check after creating a VM with
KVM_VM_MIPS_DEFAULT.
The value returned by KVM_CHECK_EXTENSION should be compared against known
values (see below). All other values are reserved. This is to allow for the
possibility of other hardware assisted virtualization implementations which
may be incompatible with the MIPS VZ ASE.
0: The trap & emulate implementation is in use to run guest code in user
mode. Guest virtual memory segments are rearranged to fit the guest in the
user mode address space.
1: The MIPS VZ ASE is in use, providing full hardware assisted
virtualization, including standard guest virtual memory segments.
8.6 KVM_CAP_MIPS_TE
Architectures: mips
This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
it is available, means that the trap & emulate implementation is available to
run guest code in user mode, even if KVM_CAP_MIPS_VZ indicates that hardware
assisted virtualisation is also available. KVM_VM_MIPS_TE (0) must be passed
to KVM_CREATE_VM to create a VM which utilises it.
If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
available, it means that the VM is using trap & emulate.
8.7 KVM_CAP_MIPS_64BIT
Architectures: mips
This capability indicates the supported architecture type of the guest, i.e. the
supported register and address width.
The values returned when this capability is checked by KVM_CHECK_EXTENSION on a
kvm VM handle correspond roughly to the CP0_Config.AT register field, and should
be checked specifically against known values (see below). All other values are
reserved.
0: MIPS32 or microMIPS32.
Both registers and addresses are 32-bits wide.
It will only be possible to run 32-bit guest code.
1: MIPS64 or microMIPS64 with access only to 32-bit compatibility segments.
Registers are 64-bits wide, but addresses are 32-bits wide.
64-bit guest code may run but cannot access MIPS64 memory segments.
It will also be possible to run 32-bit guest code.
2: MIPS64 or microMIPS64 with access to all address segments.
Both registers and addresses are 64-bits wide.
It will be possible to run 64-bit or 32-bit guest code.
...@@ -28,6 +28,11 @@ S390: ...@@ -28,6 +28,11 @@ S390:
property inside the device tree's /hypervisor node. property inside the device tree's /hypervisor node.
For more information refer to Documentation/virtual/kvm/ppc-pv.txt For more information refer to Documentation/virtual/kvm/ppc-pv.txt
MIPS:
KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall
number in $2 (v0). Up to four arguments may be placed in $4-$7 (a0-a3) and
the return value is placed in $2 (v0).
KVM Hypercalls Documentation KVM Hypercalls Documentation
=========================== ===========================
The template for each hypercall is: The template for each hypercall is:
......
...@@ -1687,6 +1687,7 @@ config CPU_CAVIUM_OCTEON ...@@ -1687,6 +1687,7 @@ config CPU_CAVIUM_OCTEON
select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select MIPS_L1_CACHE_SHIFT_7 select MIPS_L1_CACHE_SHIFT_7
select HAVE_KVM
help help
The Cavium Octeon processor is a highly integrated chip containing The Cavium Octeon processor is a highly integrated chip containing
many ethernet hardware widgets for networking tasks. The processor many ethernet hardware widgets for networking tasks. The processor
......
...@@ -444,6 +444,10 @@ ...@@ -444,6 +444,10 @@
# define cpu_has_msa 0 # define cpu_has_msa 0
#endif #endif
#ifndef cpu_has_ufr
# define cpu_has_ufr (cpu_data[0].options & MIPS_CPU_UFR)
#endif
#ifndef cpu_has_fre #ifndef cpu_has_fre
# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE) # define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
#endif #endif
...@@ -528,6 +532,9 @@ ...@@ -528,6 +532,9 @@
#ifndef cpu_guest_has_htw #ifndef cpu_guest_has_htw
#define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW) #define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW)
#endif #endif
#ifndef cpu_guest_has_mvh
#define cpu_guest_has_mvh (cpu_data[0].guest.options & MIPS_CPU_MVH)
#endif
#ifndef cpu_guest_has_msa #ifndef cpu_guest_has_msa
#define cpu_guest_has_msa (cpu_data[0].guest.ases & MIPS_ASE_MSA) #define cpu_guest_has_msa (cpu_data[0].guest.ases & MIPS_ASE_MSA)
#endif #endif
...@@ -543,6 +550,9 @@ ...@@ -543,6 +550,9 @@
#ifndef cpu_guest_has_maar #ifndef cpu_guest_has_maar
#define cpu_guest_has_maar (cpu_data[0].guest.options & MIPS_CPU_MAAR) #define cpu_guest_has_maar (cpu_data[0].guest.options & MIPS_CPU_MAAR)
#endif #endif
#ifndef cpu_guest_has_userlocal
#define cpu_guest_has_userlocal (cpu_data[0].guest.options & MIPS_CPU_ULRI)
#endif
/* /*
* Guest dynamic capabilities * Guest dynamic capabilities
......
...@@ -33,6 +33,7 @@ struct guest_info { ...@@ -33,6 +33,7 @@ struct guest_info {
unsigned long ases_dyn; unsigned long ases_dyn;
unsigned long long options; unsigned long long options;
unsigned long long options_dyn; unsigned long long options_dyn;
int tlbsize;
u8 conf; u8 conf;
u8 kscratch_mask; u8 kscratch_mask;
}; };
...@@ -109,6 +110,7 @@ struct cpuinfo_mips { ...@@ -109,6 +110,7 @@ struct cpuinfo_mips {
struct guest_info guest; struct guest_info guest;
unsigned int gtoffset_mask; unsigned int gtoffset_mask;
unsigned int guestid_mask; unsigned int guestid_mask;
unsigned int guestid_cache;
} __attribute__((aligned(SMP_CACHE_BYTES))); } __attribute__((aligned(SMP_CACHE_BYTES)));
extern struct cpuinfo_mips cpu_data[]; extern struct cpuinfo_mips cpu_data[];
......
...@@ -415,6 +415,7 @@ enum cpu_type_enum { ...@@ -415,6 +415,7 @@ enum cpu_type_enum {
#define MIPS_CPU_GUESTCTL2 MBIT_ULL(50) /* CPU has VZ GuestCtl2 register */ #define MIPS_CPU_GUESTCTL2 MBIT_ULL(50) /* CPU has VZ GuestCtl2 register */
#define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */ #define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */
#define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */ #define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */
#define MIPS_CPU_UFR MBIT_ULL(53) /* CPU supports User mode FR switching */
/* /*
* CPU ASE encodings * CPU ASE encodings
......
This diff is collapsed.
...@@ -36,7 +36,7 @@ unsigned platform_maar_init(unsigned num_pairs); ...@@ -36,7 +36,7 @@ unsigned platform_maar_init(unsigned num_pairs);
* @upper: The highest address that the MAAR pair will affect. Must be * @upper: The highest address that the MAAR pair will affect. Must be
* aligned to one byte before a 2^16 byte boundary. * aligned to one byte before a 2^16 byte boundary.
* @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The * @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
* MIPS_MAAR_V attribute will automatically be set. * MIPS_MAAR_VL attribute will automatically be set.
* *
* Program the pair of MAAR registers specified by idx to apply the attributes * Program the pair of MAAR registers specified by idx to apply the attributes
* specified by attrs to the range of addresses from lower to higher. * specified by attrs to the range of addresses from lower to higher.
...@@ -49,10 +49,10 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower, ...@@ -49,10 +49,10 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower,
BUG_ON(((upper & 0xffff) != 0xffff) BUG_ON(((upper & 0xffff) != 0xffff)
|| ((upper & ~0xffffull) & ~(MIPS_MAAR_ADDR << 4))); || ((upper & ~0xffffull) & ~(MIPS_MAAR_ADDR << 4)));
/* Automatically set MIPS_MAAR_V */ /* Automatically set MIPS_MAAR_VL */
attrs |= MIPS_MAAR_V; attrs |= MIPS_MAAR_VL;
/* Write the upper address & attributes (only MIPS_MAAR_V matters) */ /* Write the upper address & attributes (only MIPS_MAAR_VL matters) */
write_c0_maari(idx << 1); write_c0_maari(idx << 1);
back_to_back_c0_hazard(); back_to_back_c0_hazard();
write_c0_maar(((upper >> 4) & MIPS_MAAR_ADDR) | attrs); write_c0_maar(((upper >> 4) & MIPS_MAAR_ADDR) | attrs);
...@@ -81,7 +81,7 @@ extern void maar_init(void); ...@@ -81,7 +81,7 @@ extern void maar_init(void);
* @upper: The highest address that the MAAR pair will affect. Must be * @upper: The highest address that the MAAR pair will affect. Must be
* aligned to one byte before a 2^16 byte boundary. * aligned to one byte before a 2^16 byte boundary.
* @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The * @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
* MIPS_MAAR_V attribute will automatically be set. * MIPS_MAAR_VL attribute will automatically be set.
* *
* Describes the configuration of a pair of Memory Accessibility Attribute * Describes the configuration of a pair of Memory Accessibility Attribute
* Registers - applying attributes from attrs to the range of physical * Registers - applying attributes from attrs to the range of physical
......
...@@ -34,8 +34,10 @@ ...@@ -34,8 +34,10 @@
*/ */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#define _ULCAST_ #define _ULCAST_
#define _U64CAST_
#else #else
#define _ULCAST_ (unsigned long) #define _ULCAST_ (unsigned long)
#define _U64CAST_ (u64)
#endif #endif
/* /*
...@@ -217,8 +219,10 @@ ...@@ -217,8 +219,10 @@
/* /*
* Wired register bits * Wired register bits
*/ */
#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16) #define MIPSR6_WIRED_LIMIT_SHIFT 16
#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0) #define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << MIPSR6_WIRED_LIMIT_SHIFT)
#define MIPSR6_WIRED_WIRED_SHIFT 0
#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << MIPSR6_WIRED_WIRED_SHIFT)
/* /*
* Values used for computation of new tlb entries * Values used for computation of new tlb entries
...@@ -645,6 +649,7 @@ ...@@ -645,6 +649,7 @@
#define MIPS_CONF5_LLB (_ULCAST_(1) << 4) #define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
#define MIPS_CONF5_MVH (_ULCAST_(1) << 5) #define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
#define MIPS_CONF5_VP (_ULCAST_(1) << 7) #define MIPS_CONF5_VP (_ULCAST_(1) << 7)
#define MIPS_CONF5_SBRI (_ULCAST_(1) << 6)
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8) #define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9) #define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
...@@ -719,10 +724,14 @@ ...@@ -719,10 +724,14 @@
#define XLR_PERFCTRL_ALLTHREADS (_ULCAST_(1) << 13) #define XLR_PERFCTRL_ALLTHREADS (_ULCAST_(1) << 13)
/* MAAR bit definitions */ /* MAAR bit definitions */
#define MIPS_MAAR_VH (_U64CAST_(1) << 63)
#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12) #define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
#define MIPS_MAAR_ADDR_SHIFT 12 #define MIPS_MAAR_ADDR_SHIFT 12
#define MIPS_MAAR_S (_ULCAST_(1) << 1) #define MIPS_MAAR_S (_ULCAST_(1) << 1)
#define MIPS_MAAR_V (_ULCAST_(1) << 0) #define MIPS_MAAR_VL (_ULCAST_(1) << 0)
/* MAARI bit definitions */
#define MIPS_MAARI_INDEX (_ULCAST_(0x3f) << 0)
/* EBase bit definitions */ /* EBase bit definitions */
#define MIPS_EBASE_CPUNUM_SHIFT 0 #define MIPS_EBASE_CPUNUM_SHIFT 0
...@@ -736,6 +745,10 @@ ...@@ -736,6 +745,10 @@
#define MIPS_CMGCRB_BASE 11 #define MIPS_CMGCRB_BASE 11
#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1)) #define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
/* LLAddr bit definitions */
#define MIPS_LLADDR_LLB_SHIFT 0
#define MIPS_LLADDR_LLB (_ULCAST_(1) << MIPS_LLADDR_LLB_SHIFT)
/* /*
* Bits in the MIPS32 Memory Segmentation registers. * Bits in the MIPS32 Memory Segmentation registers.
*/ */
...@@ -961,6 +974,22 @@ ...@@ -961,6 +974,22 @@
/* Flush FTLB */ /* Flush FTLB */
#define LOONGSON_DIAG_FTLB (_ULCAST_(1) << 13) #define LOONGSON_DIAG_FTLB (_ULCAST_(1) << 13)
/* CvmCtl register field definitions */
#define CVMCTL_IPPCI_SHIFT 7
#define CVMCTL_IPPCI (_U64CAST_(0x7) << CVMCTL_IPPCI_SHIFT)
#define CVMCTL_IPTI_SHIFT 4
#define CVMCTL_IPTI (_U64CAST_(0x7) << CVMCTL_IPTI_SHIFT)
/* CvmMemCtl2 register field definitions */
#define CVMMEMCTL2_INHIBITTS (_U64CAST_(1) << 17)
/* CvmVMConfig register field definitions */
#define CVMVMCONF_DGHT (_U64CAST_(1) << 60)
#define CVMVMCONF_MMUSIZEM1_S 12
#define CVMVMCONF_MMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_MMUSIZEM1_S)
#define CVMVMCONF_RMMUSIZEM1_S 0
#define CVMVMCONF_RMMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_RMMUSIZEM1_S)
/* /*
* Coprocessor 1 (FPU) register names * Coprocessor 1 (FPU) register names
*/ */
...@@ -1720,6 +1749,13 @@ do { \ ...@@ -1720,6 +1749,13 @@ do { \
#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7) #define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7)
#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val) #define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
#define read_c0_cvmmemctl2() __read_64bit_c0_register($16, 6)
#define write_c0_cvmmemctl2(val) __write_64bit_c0_register($16, 6, val)
#define read_c0_cvmvmconfig() __read_64bit_c0_register($16, 7)
#define write_c0_cvmvmconfig(val) __write_64bit_c0_register($16, 7, val)
/* /*
* The cacheerr registers are not standardized. On OCTEON, they are * The cacheerr registers are not standardized. On OCTEON, they are
* 64 bits wide. * 64 bits wide.
...@@ -1989,6 +2025,8 @@ do { \ ...@@ -1989,6 +2025,8 @@ do { \
#define read_gc0_epc() __read_ulong_gc0_register(14, 0) #define read_gc0_epc() __read_ulong_gc0_register(14, 0)
#define write_gc0_epc(val) __write_ulong_gc0_register(14, 0, val) #define write_gc0_epc(val) __write_ulong_gc0_register(14, 0, val)
#define read_gc0_prid() __read_32bit_gc0_register(15, 0)
#define read_gc0_ebase() __read_32bit_gc0_register(15, 1) #define read_gc0_ebase() __read_32bit_gc0_register(15, 1)
#define write_gc0_ebase(val) __write_32bit_gc0_register(15, 1, val) #define write_gc0_ebase(val) __write_32bit_gc0_register(15, 1, val)
...@@ -2012,6 +2050,9 @@ do { \ ...@@ -2012,6 +2050,9 @@ do { \
#define write_gc0_config6(val) __write_32bit_gc0_register(16, 6, val) #define write_gc0_config6(val) __write_32bit_gc0_register(16, 6, val)
#define write_gc0_config7(val) __write_32bit_gc0_register(16, 7, val) #define write_gc0_config7(val) __write_32bit_gc0_register(16, 7, val)
#define read_gc0_lladdr() __read_ulong_gc0_register(17, 0)
#define write_gc0_lladdr(val) __write_ulong_gc0_register(17, 0, val)
#define read_gc0_watchlo0() __read_ulong_gc0_register(18, 0) #define read_gc0_watchlo0() __read_ulong_gc0_register(18, 0)
#define read_gc0_watchlo1() __read_ulong_gc0_register(18, 1) #define read_gc0_watchlo1() __read_ulong_gc0_register(18, 1)
#define read_gc0_watchlo2() __read_ulong_gc0_register(18, 2) #define read_gc0_watchlo2() __read_ulong_gc0_register(18, 2)
...@@ -2090,6 +2131,19 @@ do { \ ...@@ -2090,6 +2131,19 @@ do { \
#define write_gc0_kscratch5(val) __write_ulong_gc0_register(31, 6, val) #define write_gc0_kscratch5(val) __write_ulong_gc0_register(31, 6, val)
#define write_gc0_kscratch6(val) __write_ulong_gc0_register(31, 7, val) #define write_gc0_kscratch6(val) __write_ulong_gc0_register(31, 7, val)
/* Cavium OCTEON (cnMIPS) */
#define read_gc0_cvmcount() __read_ulong_gc0_register(9, 6)
#define write_gc0_cvmcount(val) __write_ulong_gc0_register(9, 6, val)
#define read_gc0_cvmctl() __read_64bit_gc0_register(9, 7)
#define write_gc0_cvmctl(val) __write_64bit_gc0_register(9, 7, val)
#define read_gc0_cvmmemctl() __read_64bit_gc0_register(11, 7)
#define write_gc0_cvmmemctl(val) __write_64bit_gc0_register(11, 7, val)
#define read_gc0_cvmmemctl2() __read_64bit_gc0_register(16, 6)
#define write_gc0_cvmmemctl2(val) __write_64bit_gc0_register(16, 6, val)
/* /*
* Macros to access the floating point coprocessor control registers * Macros to access the floating point coprocessor control registers
*/ */
...@@ -2696,9 +2750,11 @@ __BUILD_SET_C0(brcm_mode) ...@@ -2696,9 +2750,11 @@ __BUILD_SET_C0(brcm_mode)
*/ */
#define __BUILD_SET_GC0(name) __BUILD_SET_COMMON(gc0_##name) #define __BUILD_SET_GC0(name) __BUILD_SET_COMMON(gc0_##name)
__BUILD_SET_GC0(wired)
__BUILD_SET_GC0(status) __BUILD_SET_GC0(status)
__BUILD_SET_GC0(cause) __BUILD_SET_GC0(cause)
__BUILD_SET_GC0(ebase) __BUILD_SET_GC0(ebase)
__BUILD_SET_GC0(config1)
/* /*
* Return low 10 bits of ebase. * Return low 10 bits of ebase.
......
...@@ -21,9 +21,11 @@ ...@@ -21,9 +21,11 @@
*/ */
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#define UNIQUE_ENTRYHI(idx) \ #define _UNIQUE_ENTRYHI(base, idx) \
((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \ (((base) + ((idx) << (PAGE_SHIFT + 1))) | \
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
#define UNIQUE_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG0, idx)
#define UNIQUE_GUEST_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG1, idx)
static inline unsigned int num_wired_entries(void) static inline unsigned int num_wired_entries(void)
{ {
......
...@@ -179,7 +179,7 @@ enum cop0_coi_func { ...@@ -179,7 +179,7 @@ enum cop0_coi_func {
tlbr_op = 0x01, tlbwi_op = 0x02, tlbr_op = 0x01, tlbwi_op = 0x02,
tlbwr_op = 0x06, tlbp_op = 0x08, tlbwr_op = 0x06, tlbp_op = 0x08,
rfe_op = 0x10, eret_op = 0x18, rfe_op = 0x10, eret_op = 0x18,
wait_op = 0x20, wait_op = 0x20, hypcall_op = 0x28
}; };
/* /*
......
...@@ -54,10 +54,15 @@ struct kvm_fpu { ...@@ -54,10 +54,15 @@ struct kvm_fpu {
* Register set = 0: GP registers from kvm_regs (see definitions below). * Register set = 0: GP registers from kvm_regs (see definitions below).
* *
* Register set = 1: CP0 registers. * Register set = 1: CP0 registers.
* bits[15..8] - Must be zero. * bits[15..8] - COP0 register set.
*
* COP0 register set = 0: Main CP0 registers.
* bits[7..3] - Register 'rd' index. * bits[7..3] - Register 'rd' index.
* bits[2..0] - Register 'sel' index. * bits[2..0] - Register 'sel' index.
* *
* COP0 register set = 1: MAARs.
* bits[7..0] - MAAR index.
*
* Register set = 2: KVM specific registers (see definitions below). * Register set = 2: KVM specific registers (see definitions below).
* *
* Register set = 3: FPU / MSA registers (see definitions below). * Register set = 3: FPU / MSA registers (see definitions below).
...@@ -114,6 +119,15 @@ struct kvm_fpu { ...@@ -114,6 +119,15 @@ struct kvm_fpu {
#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34) #define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
/*
* KVM_REG_MIPS_CP0 - Coprocessor 0 registers.
*/
#define KVM_REG_MIPS_MAAR (KVM_REG_MIPS_CP0 | (1 << 8))
#define KVM_REG_MIPS_CP0_MAAR(n) (KVM_REG_MIPS_MAAR | \
KVM_REG_SIZE_U64 | (n))
/* /*
* KVM_REG_MIPS_KVM - KVM specific control registers. * KVM_REG_MIPS_KVM - KVM specific control registers.
*/ */
......
...@@ -289,6 +289,8 @@ static void cpu_set_fpu_opts(struct cpuinfo_mips *c) ...@@ -289,6 +289,8 @@ static void cpu_set_fpu_opts(struct cpuinfo_mips *c)
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
if (c->fpu_id & MIPS_FPIR_3D) if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D; c->ases |= MIPS_ASE_MIPS3D;
if (c->fpu_id & MIPS_FPIR_UFRP)
c->options |= MIPS_CPU_UFR;
if (c->fpu_id & MIPS_FPIR_FREP) if (c->fpu_id & MIPS_FPIR_FREP)
c->options |= MIPS_CPU_FRE; c->options |= MIPS_CPU_FRE;
} }
...@@ -1003,7 +1005,8 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c) ...@@ -1003,7 +1005,8 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
unsigned int config3, config3_dyn; unsigned int config3, config3_dyn;
probe_gc0_config_dyn(config3, config3, config3_dyn, probe_gc0_config_dyn(config3, config3, config3_dyn,
MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_CTXTC); MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_ULRI |
MIPS_CONF3_CTXTC);
if (config3 & MIPS_CONF3_CTXTC) if (config3 & MIPS_CONF3_CTXTC)
c->guest.options |= MIPS_CPU_CTXTC; c->guest.options |= MIPS_CPU_CTXTC;
...@@ -1013,6 +1016,9 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c) ...@@ -1013,6 +1016,9 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
if (config3 & MIPS_CONF3_PW) if (config3 & MIPS_CONF3_PW)
c->guest.options |= MIPS_CPU_HTW; c->guest.options |= MIPS_CPU_HTW;
if (config3 & MIPS_CONF3_ULRI)
c->guest.options |= MIPS_CPU_ULRI;
if (config3 & MIPS_CONF3_SC) if (config3 & MIPS_CONF3_SC)
c->guest.options |= MIPS_CPU_SEGMENTS; c->guest.options |= MIPS_CPU_SEGMENTS;
...@@ -1051,7 +1057,7 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c) ...@@ -1051,7 +1057,7 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
unsigned int config5, config5_dyn; unsigned int config5, config5_dyn;
probe_gc0_config_dyn(config5, config5, config5_dyn, probe_gc0_config_dyn(config5, config5, config5_dyn,
MIPS_CONF_M | MIPS_CONF5_MRP); MIPS_CONF_M | MIPS_CONF5_MVH | MIPS_CONF5_MRP);
if (config5 & MIPS_CONF5_MRP) if (config5 & MIPS_CONF5_MRP)
c->guest.options |= MIPS_CPU_MAAR; c->guest.options |= MIPS_CPU_MAAR;
...@@ -1061,6 +1067,9 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c) ...@@ -1061,6 +1067,9 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
if (config5 & MIPS_CONF5_LLB) if (config5 & MIPS_CONF5_LLB)
c->guest.options |= MIPS_CPU_RW_LLB; c->guest.options |= MIPS_CPU_RW_LLB;
if (config5 & MIPS_CONF5_MVH)
c->guest.options |= MIPS_CPU_MVH;
if (config5 & MIPS_CONF_M) if (config5 & MIPS_CONF_M)
c->guest.conf |= BIT(6); c->guest.conf |= BIT(6);
return config5 & MIPS_CONF_M; return config5 & MIPS_CONF_M;
......
...@@ -70,6 +70,7 @@ EXPORT_SYMBOL(perf_irq); ...@@ -70,6 +70,7 @@ EXPORT_SYMBOL(perf_irq);
*/ */
unsigned int mips_hpt_frequency; unsigned int mips_hpt_frequency;
EXPORT_SYMBOL_GPL(mips_hpt_frequency);
/* /*
* This function exists in order to cause an error due to a duplicate * This function exists in order to cause an error due to a duplicate
......
...@@ -26,11 +26,34 @@ config KVM ...@@ -26,11 +26,34 @@ config KVM
select SRCU select SRCU
---help--- ---help---
Support for hosting Guest kernels. Support for hosting Guest kernels.
Currently supported on MIPS32 processors.
choice
prompt "Virtualization mode"
depends on KVM
default KVM_MIPS_TE
config KVM_MIPS_TE
bool "Trap & Emulate"
---help---
Use trap and emulate to virtualize 32-bit guests in user mode. This
does not require any special hardware Virtualization support beyond
standard MIPS32/64 r2 or later, but it does require the guest kernel
to be configured with CONFIG_KVM_GUEST=y so that it resides in the
user address segment.
config KVM_MIPS_VZ
bool "MIPS Virtualization (VZ) ASE"
---help---
Use the MIPS Virtualization (VZ) ASE to virtualize guests. This
supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n),
but requires hardware support.
endchoice
config KVM_MIPS_DYN_TRANS config KVM_MIPS_DYN_TRANS
bool "KVM/MIPS: Dynamic binary translation to reduce traps" bool "KVM/MIPS: Dynamic binary translation to reduce traps"
depends on KVM depends on KVM_MIPS_TE
default y
---help--- ---help---
When running in Trap & Emulate mode patch privileged When running in Trap & Emulate mode patch privileged
instructions to reduce the number of traps. instructions to reduce the number of traps.
......
...@@ -9,8 +9,15 @@ common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o ...@@ -9,8 +9,15 @@ common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \ kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
interrupt.o stats.o commpage.o \ interrupt.o stats.o commpage.o \
dyntrans.o trap_emul.o fpu.o fpu.o
kvm-objs += hypcall.o
kvm-objs += mmu.o kvm-objs += mmu.o
ifdef CONFIG_KVM_MIPS_VZ
kvm-objs += vz.o
else
kvm-objs += dyntrans.o
kvm-objs += trap_emul.o
endif
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o obj-y += callback.o tlb.o
This diff is collapsed.
...@@ -51,12 +51,15 @@ ...@@ -51,12 +51,15 @@
#define RA 31 #define RA 31
/* Some CP0 registers */ /* Some CP0 registers */
#define C0_PWBASE 5, 5
#define C0_HWRENA 7, 0 #define C0_HWRENA 7, 0
#define C0_BADVADDR 8, 0 #define C0_BADVADDR 8, 0
#define C0_BADINSTR 8, 1 #define C0_BADINSTR 8, 1
#define C0_BADINSTRP 8, 2 #define C0_BADINSTRP 8, 2
#define C0_ENTRYHI 10, 0 #define C0_ENTRYHI 10, 0
#define C0_GUESTCTL1 10, 4
#define C0_STATUS 12, 0 #define C0_STATUS 12, 0
#define C0_GUESTCTL0 12, 6
#define C0_CAUSE 13, 0 #define C0_CAUSE 13, 0
#define C0_EPC 14, 0 #define C0_EPC 14, 0
#define C0_EBASE 15, 1 #define C0_EBASE 15, 1
...@@ -292,8 +295,8 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -292,8 +295,8 @@ static void *kvm_mips_build_enter_guest(void *addr)
unsigned int i; unsigned int i;
struct uasm_label labels[2]; struct uasm_label labels[2];
struct uasm_reloc relocs[2]; struct uasm_reloc relocs[2];
struct uasm_label *l = labels; struct uasm_label __maybe_unused *l = labels;
struct uasm_reloc *r = relocs; struct uasm_reloc __maybe_unused *r = relocs;
memset(labels, 0, sizeof(labels)); memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs)); memset(relocs, 0, sizeof(relocs));
...@@ -302,7 +305,67 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -302,7 +305,67 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MTC0(&p, T0, C0_EPC); UASM_i_MTC0(&p, T0, C0_EPC);
/* Set the ASID for the Guest Kernel */ #ifdef CONFIG_KVM_MIPS_VZ
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
/*
* Set up KVM GPA pgd.
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - write mm->pgd into CP0_PWBase
*
* We keep S0 pointing at struct kvm so we can load the ASID below.
*/
UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
(int)offsetof(struct kvm_vcpu, arch), K1);
UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9);
/* delay slot */
if (cpu_has_htw)
UASM_i_MTC0(&p, A0, C0_PWBASE);
else
uasm_i_nop(&p);
/* Set GM bit to setup eret to VZ guest context */
uasm_i_addiu(&p, V1, ZERO, 1);
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
if (cpu_has_guestid) {
/*
* Set root mode GuestID, so that root TLB refill handler can
* use the correct GuestID in the root TLB.
*/
/* Get current GuestID */
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
/* Set GuestCtl1.RID = GuestCtl1.ID */
uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
MIPS_GCTL1_ID_WIDTH);
uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
/* GuestID handles dealiasing so we don't need to touch ASID */
goto skip_asid_restore;
}
/* Root ASID Dealias (RAD) */
/* Save host ASID */
UASM_i_MFC0(&p, K0, C0_ENTRYHI);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
K1);
/* Set the root ASID for the Guest */
UASM_i_ADDIU(&p, T1, S0,
offsetof(struct kvm, arch.gpa_mm.context.asid));
#else
/* Set the ASID for the Guest Kernel or User */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
T0); T0);
...@@ -315,6 +378,7 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -315,6 +378,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
guest_user_mm.context.asid)); guest_user_mm.context.asid));
uasm_l_kernel_asid(&l, p); uasm_l_kernel_asid(&l, p);
#endif
/* t1: contains the base of the ASID array, need to get the cpu id */ /* t1: contains the base of the ASID array, need to get the cpu id */
/* smp_processor_id */ /* smp_processor_id */
...@@ -339,6 +403,7 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -339,6 +403,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
#endif #endif
#ifndef CONFIG_KVM_MIPS_VZ
/* /*
* Set up KVM T&E GVA pgd. * Set up KVM T&E GVA pgd.
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
...@@ -351,7 +416,11 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -351,7 +416,11 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9); uasm_i_jalr(&p, RA, T9);
uasm_i_mtc0(&p, K0, C0_ENTRYHI); uasm_i_mtc0(&p, K0, C0_ENTRYHI);
#else
/* Set up KVM VZ root ASID (!guestid) */
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
skip_asid_restore:
#endif
uasm_i_ehb(&p); uasm_i_ehb(&p);
/* Disable RDHWR access */ /* Disable RDHWR access */
...@@ -559,13 +628,10 @@ void *kvm_mips_build_exit(void *addr) ...@@ -559,13 +628,10 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */ /* Now that context has been saved, we can use other registers */
/* Restore vcpu */ /* Restore vcpu */
UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
uasm_i_move(&p, S1, A1);
/* Restore run (vcpu->run) */ /* Restore run (vcpu->run) */
UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1); UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1);
/* Save pointer to run in s0, will be saved by the compiler */
uasm_i_move(&p, S0, A0);
/* /*
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
...@@ -641,6 +707,52 @@ void *kvm_mips_build_exit(void *addr) ...@@ -641,6 +707,52 @@ void *kvm_mips_build_exit(void *addr)
uasm_l_msa_1(&l, p); uasm_l_msa_1(&l, p);
} }
#ifdef CONFIG_KVM_MIPS_VZ
/* Restore host ASID */
if (!cpu_has_guestid) {
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
K1);
UASM_i_MTC0(&p, K0, C0_ENTRYHI);
}
/*
* Set up normal Linux process pgd.
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - write mm->pgd into CP0_PWBase
*/
UASM_i_LW(&p, A0,
offsetof(struct kvm_vcpu_arch, host_pgd), K1);
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9);
/* delay slot */
if (cpu_has_htw)
UASM_i_MTC0(&p, A0, C0_PWBASE);
else
uasm_i_nop(&p);
/* Clear GM bit so we don't enter guest mode when EXL is cleared */
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
/* Save GuestCtl0 so we can access GExcCode after CPU migration */
uasm_i_sw(&p, K0,
offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
if (cpu_has_guestid) {
/*
* Clear root mode GuestID, so that root TLB operations use the
* root GuestID in the root TLB.
*/
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
}
#endif
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
uasm_i_and(&p, V0, V0, AT); uasm_i_and(&p, V0, V0, AT);
...@@ -680,6 +792,8 @@ void *kvm_mips_build_exit(void *addr) ...@@ -680,6 +792,8 @@ void *kvm_mips_build_exit(void *addr)
* Now jump to the kvm_mips_handle_exit() to see if we can deal * Now jump to the kvm_mips_handle_exit() to see if we can deal
* with this in the kernel * with this in the kernel
*/ */
uasm_i_move(&p, A0, S0);
uasm_i_move(&p, A1, S1);
UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(&p, RA, T9); uasm_i_jalr(&p, RA, T9);
UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: Hypercall handling.
*
* Copyright (C) 2015 Imagination Technologies Ltd.
*/
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/kvm_para.h>
#define MAX_HYPCALL_ARGS 4
enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
union mips_instruction inst)
{
unsigned int code = (inst.co_format.code >> 5) & 0x3ff;
kvm_debug("[%#lx] HYPCALL %#03x\n", vcpu->arch.pc, code);
switch (code) {
case 0:
return EMULATE_HYPERCALL;
default:
return EMULATE_FAIL;
};
}
static int kvm_mips_hypercall(struct kvm_vcpu *vcpu, unsigned long num,
const unsigned long *args, unsigned long *hret)
{
/* Report unimplemented hypercall to guest */
*hret = -KVM_ENOSYS;
return RESUME_GUEST;
}
int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu)
{
unsigned long num, args[MAX_HYPCALL_ARGS];
/* read hypcall number and arguments */
num = vcpu->arch.gprs[2]; /* v0 */
args[0] = vcpu->arch.gprs[4]; /* a0 */
args[1] = vcpu->arch.gprs[5]; /* a1 */
args[2] = vcpu->arch.gprs[6]; /* a2 */
args[3] = vcpu->arch.gprs[7]; /* a3 */
return kvm_mips_hypercall(vcpu, num,
args, &vcpu->arch.gprs[2] /* v0 */);
}
...@@ -30,8 +30,13 @@ ...@@ -30,8 +30,13 @@
#define C_TI (_ULCAST_(1) << 30) #define C_TI (_ULCAST_(1) << 30)
#ifdef CONFIG_KVM_MIPS_VZ
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1)
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1)
#else
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0) #define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
#endif
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority); void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority); void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
......
...@@ -59,6 +59,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -59,6 +59,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU }, { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
#ifdef CONFIG_KVM_MIPS_VZ
{ "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
{ "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
{ "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
{ "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
{ "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
{ "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
{ "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
{ "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
#endif
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU }, { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
...@@ -66,6 +76,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -66,6 +76,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{NULL} {NULL}
}; };
bool kvm_trace_guest_mode_change;
int kvm_guest_mode_change_trace_reg(void)
{
kvm_trace_guest_mode_change = 1;
return 0;
}
void kvm_guest_mode_change_trace_unreg(void)
{
kvm_trace_guest_mode_change = 0;
}
/* /*
* XXXKYMA: We are simulatoring a processor that has the WII bit set in * XXXKYMA: We are simulatoring a processor that has the WII bit set in
* Config7, so we are "runnable" if interrupts are pending * Config7, so we are "runnable" if interrupts are pending
...@@ -82,7 +105,12 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) ...@@ -82,7 +105,12 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
int kvm_arch_hardware_enable(void) int kvm_arch_hardware_enable(void)
{ {
return 0; return kvm_mips_callbacks->hardware_enable();
}
void kvm_arch_hardware_disable(void)
{
kvm_mips_callbacks->hardware_disable();
} }
int kvm_arch_hardware_setup(void) int kvm_arch_hardware_setup(void)
...@@ -97,6 +125,18 @@ void kvm_arch_check_processor_compat(void *rtn) ...@@ -97,6 +125,18 @@ void kvm_arch_check_processor_compat(void *rtn)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
switch (type) {
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else
case KVM_VM_MIPS_TE:
#endif
break;
default:
/* Unsupported KVM type */
return -EINVAL;
};
/* Allocate page table to map GPA -> RPA */ /* Allocate page table to map GPA -> RPA */
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
if (!kvm->arch.gpa_mm.pgd) if (!kvm->arch.gpa_mm.pgd)
...@@ -301,8 +341,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -301,8 +341,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
/* Build guest exception vectors dynamically in unmapped memory */ /* Build guest exception vectors dynamically in unmapped memory */
handler = gebase + 0x2000; handler = gebase + 0x2000;
/* TLB refill */ /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
refill_start = gebase; refill_start = gebase;
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
refill_start += 0x080;
refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
/* General Exception Entry point */ /* General Exception Entry point */
...@@ -353,9 +395,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -353,9 +395,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
/* Init */ /* Init */
vcpu->arch.last_sched_cpu = -1; vcpu->arch.last_sched_cpu = -1;
vcpu->arch.last_exec_cpu = -1;
/* Start off the timer */
kvm_mips_init_count(vcpu);
return vcpu; return vcpu;
...@@ -1059,7 +1099,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -1059,7 +1099,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
break; break;
default: default:
r = 0; r = kvm_mips_callbacks->check_extension(kvm, ext);
break; break;
} }
return r; return r;
...@@ -1067,7 +1107,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -1067,7 +1107,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{ {
return kvm_mips_pending_timer(vcpu); return kvm_mips_pending_timer(vcpu) ||
kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
} }
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
...@@ -1092,7 +1133,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) ...@@ -1092,7 +1133,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
cop0 = vcpu->arch.cop0; cop0 = vcpu->arch.cop0;
kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n", kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_status(cop0),
kvm_read_c0_guest_cause(cop0)); kvm_read_c0_guest_cause(cop0));
...@@ -1208,6 +1249,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1208,6 +1249,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
/* re-enable HTW before enabling interrupts */ /* re-enable HTW before enabling interrupts */
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
htw_start(); htw_start();
/* Set a default exit reason */ /* Set a default exit reason */
...@@ -1226,9 +1268,11 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1226,9 +1268,11 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
cause, opc, run, vcpu); cause, opc, run, vcpu);
trace_kvm_exit(vcpu, exccode); trace_kvm_exit(vcpu, exccode);
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
/* /*
* Do a privilege check, if in UM most of these exit conditions end up * Do a privilege check, if in UM most of these exit conditions
* causing an exception to be delivered to the Guest Kernel * end up causing an exception to be delivered to the Guest
* Kernel
*/ */
er = kvm_mips_check_privilege(cause, opc, run, vcpu); er = kvm_mips_check_privilege(cause, opc, run, vcpu);
if (er == EMULATE_PRIV_FAIL) { if (er == EMULATE_PRIV_FAIL) {
...@@ -1238,6 +1282,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1238,6 +1282,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
goto skip_emul; goto skip_emul;
} }
}
switch (exccode) { switch (exccode) {
case EXCCODE_INT: case EXCCODE_INT:
...@@ -1267,7 +1312,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1267,7 +1312,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
break; break;
case EXCCODE_TLBS: case EXCCODE_TLBS:
kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
badvaddr); badvaddr);
...@@ -1328,12 +1373,17 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1328,12 +1373,17 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
break; break;
case EXCCODE_GE:
/* defer exit accounting to handler */
ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
break;
default: default:
if (cause & CAUSEF_BD) if (cause & CAUSEF_BD)
opc += 1; opc += 1;
inst = 0; inst = 0;
kvm_get_badinstr(opc, vcpu, &inst); kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr, exccode, opc, inst, badvaddr,
kvm_read_c0_guest_status(vcpu->arch.cop0)); kvm_read_c0_guest_status(vcpu->arch.cop0));
kvm_arch_vcpu_dump_regs(vcpu); kvm_arch_vcpu_dump_regs(vcpu);
...@@ -1346,6 +1396,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1346,6 +1396,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
skip_emul: skip_emul:
local_irq_disable(); local_irq_disable();
if (ret == RESUME_GUEST)
kvm_vz_acquire_htimer(vcpu);
if (er == EMULATE_DONE && !(ret & RESUME_HOST)) if (er == EMULATE_DONE && !(ret & RESUME_HOST))
kvm_mips_deliver_interrupts(vcpu, cause); kvm_mips_deliver_interrupts(vcpu, cause);
...@@ -1391,6 +1444,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1391,6 +1444,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
/* Disable HTW before returning to guest or host */ /* Disable HTW before returning to guest or host */
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
htw_stop(); htw_stop();
return ret; return ret;
...@@ -1527,16 +1581,18 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu) ...@@ -1527,16 +1581,18 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu)
void kvm_lose_fpu(struct kvm_vcpu *vcpu) void kvm_lose_fpu(struct kvm_vcpu *vcpu)
{ {
/* /*
* FPU & MSA get disabled in root context (hardware) when it is disabled * With T&E, FPU & MSA get disabled in root context (hardware) when it
* in guest context (software), but the register state in the hardware * is disabled in guest context (software), but the register state in
* may still be in use. This is why we explicitly re-enable the hardware * the hardware may still be in use.
* before saving. * This is why we explicitly re-enable the hardware before saving.
*/ */
preempt_disable(); preempt_disable();
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
set_c0_config5(MIPS_CONF5_MSAEN); set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard(); enable_fpu_hazard();
}
__kvm_save_msa(&vcpu->arch); __kvm_save_msa(&vcpu->arch);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
...@@ -1549,8 +1605,10 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) ...@@ -1549,8 +1605,10 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
} }
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
set_c0_status(ST0_CU1); set_c0_status(ST0_CU1);
enable_fpu_hazard(); enable_fpu_hazard();
}
__kvm_save_fpu(&vcpu->arch); __kvm_save_fpu(&vcpu->arch);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
......
...@@ -992,6 +992,22 @@ static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo) ...@@ -992,6 +992,22 @@ static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
return kvm_mips_gpa_pte_to_gva_unmapped(pte); return kvm_mips_gpa_pte_to_gva_unmapped(pte);
} }
#ifdef CONFIG_KVM_MIPS_VZ
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu,
bool write_fault)
{
int ret;
ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
if (ret)
return ret;
/* Invalidate this entry in the TLB */
return kvm_vz_host_tlb_inv(vcpu, badvaddr);
}
#endif
/* XXXKYMA: Must be called with interrupts disabled */ /* XXXKYMA: Must be called with interrupts disabled */
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu, struct kvm_vcpu *vcpu,
...@@ -1225,6 +1241,10 @@ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) ...@@ -1225,6 +1241,10 @@ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
{ {
int err; int err;
if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
"Expect BadInstr/BadInstrP registers to be used with VZ\n"))
return -EINVAL;
retry: retry:
kvm_trap_emul_gva_lockless_begin(vcpu); kvm_trap_emul_gva_lockless_begin(vcpu);
err = get_user(*out, opc); err = get_user(*out, opc);
......
This diff is collapsed.
...@@ -17,6 +17,13 @@ ...@@ -17,6 +17,13 @@
#define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace #define TRACE_INCLUDE_FILE trace
/*
* arch/mips/kvm/mips.c
*/
extern bool kvm_trace_guest_mode_change;
int kvm_guest_mode_change_trace_reg(void);
void kvm_guest_mode_change_trace_unreg(void);
/* /*
* Tracepoints for VM enters * Tracepoints for VM enters
*/ */
...@@ -62,10 +69,20 @@ DEFINE_EVENT(kvm_transition, kvm_out, ...@@ -62,10 +69,20 @@ DEFINE_EVENT(kvm_transition, kvm_out,
#define KVM_TRACE_EXIT_MSA_FPE 14 #define KVM_TRACE_EXIT_MSA_FPE 14
#define KVM_TRACE_EXIT_FPE 15 #define KVM_TRACE_EXIT_FPE 15
#define KVM_TRACE_EXIT_MSA_DISABLED 21 #define KVM_TRACE_EXIT_MSA_DISABLED 21
#define KVM_TRACE_EXIT_GUEST_EXIT 27
/* Further exit reasons */ /* Further exit reasons */
#define KVM_TRACE_EXIT_WAIT 32 #define KVM_TRACE_EXIT_WAIT 32
#define KVM_TRACE_EXIT_CACHE 33 #define KVM_TRACE_EXIT_CACHE 33
#define KVM_TRACE_EXIT_SIGNAL 34 #define KVM_TRACE_EXIT_SIGNAL 34
/* 32 exit reasons correspond to GuestCtl0.GExcCode (VZ) */
#define KVM_TRACE_EXIT_GEXCCODE_BASE 64
#define KVM_TRACE_EXIT_GPSI 64 /* 0 */
#define KVM_TRACE_EXIT_GSFC 65 /* 1 */
#define KVM_TRACE_EXIT_HC 66 /* 2 */
#define KVM_TRACE_EXIT_GRR 67 /* 3 */
#define KVM_TRACE_EXIT_GVA 72 /* 8 */
#define KVM_TRACE_EXIT_GHFC 73 /* 9 */
#define KVM_TRACE_EXIT_GPA 74 /* 10 */
/* Tracepoints for VM exits */ /* Tracepoints for VM exits */
#define kvm_trace_symbol_exit_types \ #define kvm_trace_symbol_exit_types \
...@@ -83,9 +100,17 @@ DEFINE_EVENT(kvm_transition, kvm_out, ...@@ -83,9 +100,17 @@ DEFINE_EVENT(kvm_transition, kvm_out,
{ KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \ { KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
{ KVM_TRACE_EXIT_FPE, "FPE" }, \ { KVM_TRACE_EXIT_FPE, "FPE" }, \
{ KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \ { KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
{ KVM_TRACE_EXIT_GUEST_EXIT, "Guest Exit" }, \
{ KVM_TRACE_EXIT_WAIT, "WAIT" }, \ { KVM_TRACE_EXIT_WAIT, "WAIT" }, \
{ KVM_TRACE_EXIT_CACHE, "CACHE" }, \ { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
{ KVM_TRACE_EXIT_SIGNAL, "Signal" } { KVM_TRACE_EXIT_SIGNAL, "Signal" }, \
{ KVM_TRACE_EXIT_GPSI, "GPSI" }, \
{ KVM_TRACE_EXIT_GSFC, "GSFC" }, \
{ KVM_TRACE_EXIT_HC, "HC" }, \
{ KVM_TRACE_EXIT_GRR, "GRR" }, \
{ KVM_TRACE_EXIT_GVA, "GVA" }, \
{ KVM_TRACE_EXIT_GHFC, "GHFC" }, \
{ KVM_TRACE_EXIT_GPA, "GPA" }
TRACE_EVENT(kvm_exit, TRACE_EVENT(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
...@@ -158,6 +183,8 @@ TRACE_EVENT(kvm_exit, ...@@ -158,6 +183,8 @@ TRACE_EVENT(kvm_exit,
{ KVM_TRACE_COP0(16, 4), "Config4" }, \ { KVM_TRACE_COP0(16, 4), "Config4" }, \
{ KVM_TRACE_COP0(16, 5), "Config5" }, \ { KVM_TRACE_COP0(16, 5), "Config5" }, \
{ KVM_TRACE_COP0(16, 7), "Config7" }, \ { KVM_TRACE_COP0(16, 7), "Config7" }, \
{ KVM_TRACE_COP0(17, 1), "MAAR" }, \
{ KVM_TRACE_COP0(17, 2), "MAARI" }, \
{ KVM_TRACE_COP0(26, 0), "ECC" }, \ { KVM_TRACE_COP0(26, 0), "ECC" }, \
{ KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \ { KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
{ KVM_TRACE_COP0(31, 2), "KScratch1" }, \ { KVM_TRACE_COP0(31, 2), "KScratch1" }, \
...@@ -268,6 +295,51 @@ TRACE_EVENT(kvm_asid_change, ...@@ -268,6 +295,51 @@ TRACE_EVENT(kvm_asid_change,
__entry->new_asid) __entry->new_asid)
); );
TRACE_EVENT(kvm_guestid_change,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int guestid),
TP_ARGS(vcpu, guestid),
TP_STRUCT__entry(
__field(unsigned int, guestid)
),
TP_fast_assign(
__entry->guestid = guestid;
),
TP_printk("GuestID: 0x%02x",
__entry->guestid)
);
TRACE_EVENT_FN(kvm_guest_mode_change,
TP_PROTO(struct kvm_vcpu *vcpu),
TP_ARGS(vcpu),
TP_STRUCT__entry(
__field(unsigned long, epc)
__field(unsigned long, pc)
__field(unsigned long, badvaddr)
__field(unsigned int, status)
__field(unsigned int, cause)
),
TP_fast_assign(
__entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
__entry->pc = vcpu->arch.pc;
__entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
__entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
__entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
),
TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
__entry->epc,
__entry->pc,
__entry->status,
__entry->cause,
__entry->badvaddr),
kvm_guest_mode_change_trace_reg,
kvm_guest_mode_change_trace_unreg
);
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/log2.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -40,6 +41,29 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) ...@@ -40,6 +41,29 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
return gpa; return gpa;
} }
static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
{
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 inst = 0;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr,
kvm_read_c0_guest_status(vcpu->arch.cop0));
kvm_arch_vcpu_dump_regs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -82,6 +106,10 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) ...@@ -82,6 +106,10 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
ret = RESUME_HOST; ret = RESUME_HOST;
break; break;
case EMULATE_HYPERCALL:
ret = kvm_mips_handle_hypcall(vcpu);
break;
default: default:
BUG(); BUG();
} }
...@@ -484,6 +512,31 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) ...@@ -484,6 +512,31 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
static int kvm_trap_emul_hardware_enable(void)
{
return 0;
}
static void kvm_trap_emul_hardware_disable(void)
{
}
static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
{
int r;
switch (ext) {
case KVM_CAP_MIPS_TE:
r = 1;
break;
default:
r = 0;
break;
}
return r;
}
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
{ {
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
...@@ -561,6 +614,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -561,6 +614,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
u32 config, config1; u32 config, config1;
int vcpu_id = vcpu->vcpu_id; int vcpu_id = vcpu->vcpu_id;
/* Start off the timer at 100 MHz */
kvm_mips_init_count(vcpu, 100*1000*1000);
/* /*
* Arch specific stuff, set up config registers properly so that the * Arch specific stuff, set up config registers properly so that the
* guest will come up as expected * guest will come up as expected
...@@ -589,6 +645,13 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -589,6 +645,13 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
/* Read the cache characteristics from the host Config1 Register */ /* Read the cache characteristics from the host Config1 Register */
config1 = (read_c0_config1() & ~0x7f); config1 = (read_c0_config1() & ~0x7f);
/* DCache line size not correctly reported in Config1 on Octeon CPUs */
if (cpu_dcache_line_size()) {
config1 &= ~MIPS_CONF1_DL;
config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
}
/* Set up MMU size */ /* Set up MMU size */
config1 &= ~(0x3f << 25); config1 &= ~(0x3f << 25);
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
...@@ -892,10 +955,12 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, ...@@ -892,10 +955,12 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
if (v & CAUSEF_DC) { if (v & CAUSEF_DC) {
/* disable timer first */ /* disable timer first */
kvm_mips_count_disable_cause(vcpu); kvm_mips_count_disable_cause(vcpu);
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
v);
} else { } else {
/* enable timer last */ /* enable timer last */
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
v);
kvm_mips_count_enable_cause(vcpu); kvm_mips_count_enable_cause(vcpu);
} }
} else { } else {
...@@ -1230,7 +1295,11 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { ...@@ -1230,7 +1295,11 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
.handle_fpe = kvm_trap_emul_handle_fpe, .handle_fpe = kvm_trap_emul_handle_fpe,
.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
.handle_guest_exit = kvm_trap_emul_no_handler,
.hardware_enable = kvm_trap_emul_hardware_enable,
.hardware_disable = kvm_trap_emul_hardware_disable,
.check_extension = kvm_trap_emul_check_extension,
.vcpu_init = kvm_trap_emul_vcpu_init, .vcpu_init = kvm_trap_emul_vcpu_init,
.vcpu_uninit = kvm_trap_emul_vcpu_uninit, .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
.vcpu_setup = kvm_trap_emul_vcpu_setup, .vcpu_setup = kvm_trap_emul_vcpu_setup,
......
This diff is collapsed.
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
/* Cache operations. */ /* Cache operations. */
void (*flush_cache_all)(void); void (*flush_cache_all)(void);
void (*__flush_cache_all)(void); void (*__flush_cache_all)(void);
EXPORT_SYMBOL_GPL(__flush_cache_all);
void (*flush_cache_mm)(struct mm_struct *mm); void (*flush_cache_mm)(struct mm_struct *mm);
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
......
...@@ -348,7 +348,7 @@ void maar_init(void) ...@@ -348,7 +348,7 @@ void maar_init(void)
upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
pr_info(" [%d]: ", i / 2); pr_info(" [%d]: ", i / 2);
if (!(attr & MIPS_MAAR_V)) { if (!(attr & MIPS_MAAR_VL)) {
pr_cont("disabled\n"); pr_cont("disabled\n");
continue; continue;
} }
......
...@@ -702,6 +702,10 @@ struct kvm_ppc_resize_hpt { ...@@ -702,6 +702,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1 #define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2 #define KVM_VM_PPC_PR 2
/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
#define KVM_VM_MIPS_TE 0
#define KVM_VM_MIPS_VZ 1
#define KVM_S390_SIE_PAGE_OFFSET 1 #define KVM_S390_SIE_PAGE_OFFSET 1
/* /*
...@@ -883,6 +887,9 @@ struct kvm_ppc_resize_hpt { ...@@ -883,6 +887,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_MMU_RADIX 134 #define KVM_CAP_PPC_MMU_RADIX 134
#define KVM_CAP_PPC_MMU_HASH_V3 135 #define KVM_CAP_PPC_MMU_HASH_V3 135
#define KVM_CAP_IMMEDIATE_EXIT 136 #define KVM_CAP_IMMEDIATE_EXIT 136
#define KVM_CAP_MIPS_VZ 137
#define KVM_CAP_MIPS_TE 138
#define KVM_CAP_MIPS_64BIT 139
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment