Commit 8a5de182 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'kvm-arm-for-3.18-take-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm

Pull second batch of changes for KVM/{arm,arm64} from Marc Zyngier:
 "The most obvious thing is the sizeable MMU changes to support 48bit
  VAs on arm64.

  Summary:

   - support for 48bit IPA and VA (EL2)
   - a number of fixes for devices mapped into guests
   - yet another VGIC fix for BE
   - a fix for CPU hotplug
   - a few compile fixes (disabled VGIC, strict mm checks)"

[ I'm pulling directly from Marc at the request of Paolo Bonzini, whose
  backpack was stolen at Düsseldorf airport and will do new keys and
  rebuild his web of trust.    - Linus ]

* tag 'kvm-arm-for-3.18-take-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm:
  arm/arm64: KVM: Fix BE accesses to GICv2 EISR and ELRSR regs
  arm: kvm: STRICT_MM_TYPECHECKS fix for user_mem_abort
  arm/arm64: KVM: Ensure memslots are within KVM_PHYS_SIZE
  arm64: KVM: Implement 48 VA support for KVM EL2 and Stage-2
  arm/arm64: KVM: map MMIO regions at creation time
  arm64: kvm: define PAGE_S2_DEVICE as read-only by default
  ARM: kvm: define PAGE_S2_DEVICE as read-only by default
  arm/arm64: KVM: add 'writable' parameter to kvm_phys_addr_ioremap
  arm/arm64: KVM: fix potential NULL dereference in user_mem_abort()
  arm/arm64: KVM: use __GFP_ZERO not memset() to get zeroed pages
  ARM: KVM: fix vgic-disabled build
  arm: kvm: fix CPU hotplug
parents 857b50f5 2df36a5d
...@@ -37,6 +37,11 @@ ...@@ -37,6 +37,11 @@
*/ */
#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE) #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
/*
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
*/
#define KVM_MMU_CACHE_MIN_PAGES 2
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -50,7 +55,7 @@ void free_hyp_pgds(void); ...@@ -50,7 +55,7 @@ void free_hyp_pgds(void);
int kvm_alloc_stage2_pgd(struct kvm *kvm); int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size); phys_addr_t pa, unsigned long size, bool writable);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
...@@ -83,6 +88,11 @@ static inline void kvm_clean_pgd(pgd_t *pgd) ...@@ -83,6 +88,11 @@ static inline void kvm_clean_pgd(pgd_t *pgd)
clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
} }
static inline void kvm_clean_pmd(pmd_t *pmd)
{
clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
}
static inline void kvm_clean_pmd_entry(pmd_t *pmd) static inline void kvm_clean_pmd_entry(pmd_t *pmd)
{ {
clean_pmd_entry(pmd); clean_pmd_entry(pmd);
...@@ -123,10 +133,23 @@ static inline bool kvm_page_empty(void *ptr) ...@@ -123,10 +133,23 @@ static inline bool kvm_page_empty(void *ptr)
} }
#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep) #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp) #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
#define kvm_pud_table_empty(pudp) (0) #define kvm_pud_table_empty(kvm, pudp) (0)
#define KVM_PREALLOC_LEVEL 0
static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
{
return 0;
}
static inline void kvm_free_hwpgd(struct kvm *kvm) { }
static inline void *kvm_get_hwpgd(struct kvm *kvm)
{
return kvm->arch.pgd;
}
struct kvm; struct kvm;
......
...@@ -100,7 +100,7 @@ extern pgprot_t pgprot_s2_device; ...@@ -100,7 +100,7 @@ extern pgprot_t pgprot_s2_device;
#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) #define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) #define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR) #define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
......
...@@ -409,7 +409,7 @@ static void update_vttbr(struct kvm *kvm) ...@@ -409,7 +409,7 @@ static void update_vttbr(struct kvm *kvm)
kvm_next_vmid++; kvm_next_vmid++;
/* update vttbr to be used with the new vmid */ /* update vttbr to be used with the new vmid */
pgd_phys = virt_to_phys(kvm->arch.pgd); pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
kvm->arch.vttbr = pgd_phys | vmid; kvm->arch.vttbr = pgd_phys | vmid;
...@@ -808,6 +808,7 @@ static int hyp_init_cpu_notify(struct notifier_block *self, ...@@ -808,6 +808,7 @@ static int hyp_init_cpu_notify(struct notifier_block *self,
switch (action) { switch (action) {
case CPU_STARTING: case CPU_STARTING:
case CPU_STARTING_FROZEN: case CPU_STARTING_FROZEN:
if (__hyp_get_vectors() == hyp_default_vectors)
cpu_init_hyp_mode(NULL); cpu_init_hyp_mode(NULL);
break; break;
} }
......
...@@ -433,10 +433,17 @@ ARM_BE8(rev r10, r10 ) ...@@ -433,10 +433,17 @@ ARM_BE8(rev r10, r10 )
str r3, [r11, #VGIC_V2_CPU_HCR] str r3, [r11, #VGIC_V2_CPU_HCR]
str r4, [r11, #VGIC_V2_CPU_VMCR] str r4, [r11, #VGIC_V2_CPU_VMCR]
str r5, [r11, #VGIC_V2_CPU_MISR] str r5, [r11, #VGIC_V2_CPU_MISR]
#ifdef CONFIG_CPU_ENDIAN_BE8
str r6, [r11, #(VGIC_V2_CPU_EISR + 4)]
str r7, [r11, #VGIC_V2_CPU_EISR]
str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
str r9, [r11, #VGIC_V2_CPU_ELRSR]
#else
str r6, [r11, #VGIC_V2_CPU_EISR] str r6, [r11, #VGIC_V2_CPU_EISR]
str r7, [r11, #(VGIC_V2_CPU_EISR + 4)] str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
str r8, [r11, #VGIC_V2_CPU_ELRSR] str r8, [r11, #VGIC_V2_CPU_ELRSR]
str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)] str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
#endif
str r10, [r11, #VGIC_V2_CPU_APR] str r10, [r11, #VGIC_V2_CPU_APR]
/* Clear GICH_HCR */ /* Clear GICH_HCR */
......
This diff is collapsed.
...@@ -41,6 +41,18 @@ ...@@ -41,6 +41,18 @@
*/ */
#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
/*
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
* levels in addition to the PGD and potentially the PUD which are
* pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
* tables use one level of tables less than the kernel.
*/
#ifdef CONFIG_ARM64_64K_PAGES
#define KVM_MMU_CACHE_MIN_PAGES 1
#else
#define KVM_MMU_CACHE_MIN_PAGES 2
#endif
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
/* /*
...@@ -53,6 +65,7 @@ ...@@ -53,6 +65,7 @@
#else #else
#include <asm/pgalloc.h>
#include <asm/cachetype.h> #include <asm/cachetype.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -65,10 +78,6 @@ ...@@ -65,10 +78,6 @@
#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
/* Make sure we get the right size, and thus the right alignment */
#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
int create_hyp_mappings(void *from, void *to); int create_hyp_mappings(void *from, void *to);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t); int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void); void free_boot_hyp_pgd(void);
...@@ -77,7 +86,7 @@ void free_hyp_pgds(void); ...@@ -77,7 +86,7 @@ void free_hyp_pgds(void);
int kvm_alloc_stage2_pgd(struct kvm *kvm); int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size); phys_addr_t pa, unsigned long size, bool writable);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
...@@ -93,6 +102,7 @@ void kvm_clear_hyp_idmap(void); ...@@ -93,6 +102,7 @@ void kvm_clear_hyp_idmap(void);
#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
static inline void kvm_clean_pgd(pgd_t *pgd) {} static inline void kvm_clean_pgd(pgd_t *pgd) {}
static inline void kvm_clean_pmd(pmd_t *pmd) {}
static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
static inline void kvm_clean_pte(pte_t *pte) {} static inline void kvm_clean_pte(pte_t *pte) {}
static inline void kvm_clean_pte_entry(pte_t *pte) {} static inline void kvm_clean_pte_entry(pte_t *pte) {}
...@@ -111,19 +121,116 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd) ...@@ -111,19 +121,116 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
/*
* In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
* the entire IPA input range with a single pgd entry, and we would only need
* one pgd entry. Note that in this case, the pgd is actually not used by
* the MMU for Stage-2 translations, but is merely a fake pgd used as a data
* structure for the kernel pgtable macros to work.
*/
#if PGDIR_SHIFT > KVM_PHYS_SHIFT
#define PTRS_PER_S2_PGD_SHIFT 0
#else
#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
#endif
#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
/*
* If we are concatenating first level stage-2 page tables, we would have less
* than or equal to 16 pointers in the fake PGD, because that's what the
* architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS)
* represents the first level for the host, and we add 1 to go to the next
* level (which uses contatenation) for the stage-2 tables.
*/
#if PTRS_PER_S2_PGD <= 16
#define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1)
#else
#define KVM_PREALLOC_LEVEL (0)
#endif
/**
* kvm_prealloc_hwpgd - allocate inital table for VTTBR
* @kvm: The KVM struct pointer for the VM.
* @pgd: The kernel pseudo pgd
*
* When the kernel uses more levels of page tables than the guest, we allocate
* a fake PGD and pre-populate it to point to the next-level page table, which
* will be the real initial page table pointed to by the VTTBR.
*
* When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
* the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we
* allocate 2 consecutive PUD pages.
*/
static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
{
unsigned int i;
unsigned long hwpgd;
if (KVM_PREALLOC_LEVEL == 0)
return 0;
hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
if (!hwpgd)
return -ENOMEM;
for (i = 0; i < PTRS_PER_S2_PGD; i++) {
if (KVM_PREALLOC_LEVEL == 1)
pgd_populate(NULL, pgd + i,
(pud_t *)hwpgd + i * PTRS_PER_PUD);
else if (KVM_PREALLOC_LEVEL == 2)
pud_populate(NULL, pud_offset(pgd, 0) + i,
(pmd_t *)hwpgd + i * PTRS_PER_PMD);
}
return 0;
}
static inline void *kvm_get_hwpgd(struct kvm *kvm)
{
pgd_t *pgd = kvm->arch.pgd;
pud_t *pud;
if (KVM_PREALLOC_LEVEL == 0)
return pgd;
pud = pud_offset(pgd, 0);
if (KVM_PREALLOC_LEVEL == 1)
return pud;
BUG_ON(KVM_PREALLOC_LEVEL != 2);
return pmd_offset(pud, 0);
}
static inline void kvm_free_hwpgd(struct kvm *kvm)
{
if (KVM_PREALLOC_LEVEL > 0) {
unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
}
}
static inline bool kvm_page_empty(void *ptr) static inline bool kvm_page_empty(void *ptr)
{ {
struct page *ptr_page = virt_to_page(ptr); struct page *ptr_page = virt_to_page(ptr);
return page_count(ptr_page) == 1; return page_count(ptr_page) == 1;
} }
#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep) #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
#ifndef CONFIG_ARM64_64K_PAGES
#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp) #ifdef __PAGETABLE_PMD_FOLDED
#define kvm_pmd_table_empty(kvm, pmdp) (0)
#else
#define kvm_pmd_table_empty(kvm, pmdp) \
(kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
#endif
#ifdef __PAGETABLE_PUD_FOLDED
#define kvm_pud_table_empty(kvm, pudp) (0)
#else #else
#define kvm_pmd_table_empty(pmdp) (0) #define kvm_pud_table_empty(kvm, pudp) \
(kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
#endif #endif
#define kvm_pud_table_empty(pudp) (0)
struct kvm; struct kvm;
......
...@@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); ...@@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
......
...@@ -67,10 +67,14 @@ CPU_BE( rev w11, w11 ) ...@@ -67,10 +67,14 @@ CPU_BE( rev w11, w11 )
str w4, [x3, #VGIC_V2_CPU_HCR] str w4, [x3, #VGIC_V2_CPU_HCR]
str w5, [x3, #VGIC_V2_CPU_VMCR] str w5, [x3, #VGIC_V2_CPU_VMCR]
str w6, [x3, #VGIC_V2_CPU_MISR] str w6, [x3, #VGIC_V2_CPU_MISR]
str w7, [x3, #VGIC_V2_CPU_EISR] CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
str w9, [x3, #VGIC_V2_CPU_ELRSR] CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] )
str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] )
CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] )
str w11, [x3, #VGIC_V2_CPU_APR] str w11, [x3, #VGIC_V2_CPU_APR]
/* Clear GICH_HCR */ /* Clear GICH_HCR */
......
...@@ -219,8 +219,8 @@ struct vgic_v2_cpu_if { ...@@ -219,8 +219,8 @@ struct vgic_v2_cpu_if {
u32 vgic_hcr; u32 vgic_hcr;
u32 vgic_vmcr; u32 vgic_vmcr;
u32 vgic_misr; /* Saved only */ u32 vgic_misr; /* Saved only */
u32 vgic_eisr[2]; /* Saved only */ u64 vgic_eisr; /* Saved only */
u32 vgic_elrsr[2]; /* Saved only */ u64 vgic_elrsr; /* Saved only */
u32 vgic_apr; u32 vgic_apr;
u32 vgic_lr[VGIC_V2_MAX_LRS]; u32 vgic_lr[VGIC_V2_MAX_LRS];
}; };
...@@ -331,6 +331,14 @@ static inline int kvm_vgic_create(struct kvm *kvm) ...@@ -331,6 +331,14 @@ static inline int kvm_vgic_create(struct kvm *kvm)
return 0; return 0;
} }
static inline void kvm_vgic_destroy(struct kvm *kvm)
{
}
static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
{
}
static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
{ {
return 0; return 0;
......
...@@ -71,35 +71,17 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, ...@@ -71,35 +71,17 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr lr_desc) struct vgic_lr lr_desc)
{ {
if (!(lr_desc.state & LR_STATE_MASK)) if (!(lr_desc.state & LR_STATE_MASK))
__set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr); vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
} }
static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
{ {
u64 val; return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
#if BITS_PER_LONG == 64
val = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1];
val <<= 32;
val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0];
#else
val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
#endif
return val;
} }
static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
{ {
u64 val; return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
#if BITS_PER_LONG == 64
val = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1];
val <<= 32;
val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0];
#else
val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
#endif
return val;
} }
static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
......
...@@ -145,6 +145,20 @@ static void vgic_free_bitmap(struct vgic_bitmap *b) ...@@ -145,6 +145,20 @@ static void vgic_free_bitmap(struct vgic_bitmap *b)
b->shared = NULL; b->shared = NULL;
} }
/*
* Call this function to convert a u64 value to an unsigned long * bitmask
* in a way that works on both 32-bit and 64-bit LE and BE platforms.
*
* Warning: Calling this function may modify *val.
*/
static unsigned long *u64_to_bitmask(u64 *val)
{
#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
*val = (*val >> 32) | (*val << 32);
#endif
return (unsigned long *)val;
}
static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
int cpuid, u32 offset) int cpuid, u32 offset)
{ {
...@@ -1442,7 +1456,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) ...@@ -1442,7 +1456,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
* active bit. * active bit.
*/ */
u64 eisr = vgic_get_eisr(vcpu); u64 eisr = vgic_get_eisr(vcpu);
unsigned long *eisr_ptr = (unsigned long *)&eisr; unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
int lr; int lr;
for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
...@@ -1505,7 +1519,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -1505,7 +1519,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
level_pending = vgic_process_maintenance(vcpu); level_pending = vgic_process_maintenance(vcpu);
elrsr = vgic_get_elrsr(vcpu); elrsr = vgic_get_elrsr(vcpu);
elrsr_ptr = (unsigned long *)&elrsr; elrsr_ptr = u64_to_bitmask(&elrsr);
/* Clear mappings for empty LRs */ /* Clear mappings for empty LRs */
for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) { for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
...@@ -1899,7 +1913,8 @@ int kvm_vgic_init(struct kvm *kvm) ...@@ -1899,7 +1913,8 @@ int kvm_vgic_init(struct kvm *kvm)
} }
ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE); vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
true);
if (ret) { if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n"); kvm_err("Unable to remap VGIC CPU to VCPU\n");
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment