Commit d99bb72a authored by Juergen Gross's avatar Juergen Gross Committed by Boris Ostrovsky

x86/xen: remove 32-bit pv leftovers

There are some remaining 32-bit pv-guest support leftovers in the Xen
hypercall interface. Remove them.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Link: https://lore.kernel.org/r/20211028081221.2475-2-jgross@suse.comReviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent a67efff2
...@@ -323,9 +323,7 @@ HYPERVISOR_get_debugreg(int reg) ...@@ -323,9 +323,7 @@ HYPERVISOR_get_debugreg(int reg)
static inline int static inline int
HYPERVISOR_update_descriptor(u64 ma, u64 desc) HYPERVISOR_update_descriptor(u64 ma, u64 desc)
{ {
if (sizeof(u64) == sizeof(long)) return _hypercall2(int, update_descriptor, ma, desc);
return _hypercall2(int, update_descriptor, ma, desc);
return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
} }
static inline long static inline long
...@@ -344,12 +342,7 @@ static inline int ...@@ -344,12 +342,7 @@ static inline int
HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
unsigned long flags) unsigned long flags)
{ {
if (sizeof(new_val) == sizeof(long)) return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
return _hypercall3(int, update_va_mapping, va,
new_val.pte, flags);
else
return _hypercall4(int, update_va_mapping, va,
new_val.pte, new_val.pte >> 32, flags);
} }
static inline int static inline int
...@@ -461,16 +454,10 @@ MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, ...@@ -461,16 +454,10 @@ MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
{ {
mcl->op = __HYPERVISOR_update_va_mapping; mcl->op = __HYPERVISOR_update_va_mapping;
mcl->args[0] = va; mcl->args[0] = va;
if (sizeof(new_val) == sizeof(long)) { mcl->args[1] = new_val.pte;
mcl->args[1] = new_val.pte; mcl->args[2] = flags;
mcl->args[2] = flags;
} else {
mcl->args[1] = new_val.pte;
mcl->args[2] = new_val.pte >> 32;
mcl->args[3] = flags;
}
trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4); trace_xen_mc_entry(mcl, 3);
} }
static inline void static inline void
...@@ -478,19 +465,10 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, ...@@ -478,19 +465,10 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
struct desc_struct desc) struct desc_struct desc)
{ {
mcl->op = __HYPERVISOR_update_descriptor; mcl->op = __HYPERVISOR_update_descriptor;
if (sizeof(maddr) == sizeof(long)) { mcl->args[0] = maddr;
mcl->args[0] = maddr; mcl->args[1] = *(unsigned long *)&desc;
mcl->args[1] = *(unsigned long *)&desc;
} else { trace_xen_mc_entry(mcl, 2);
u32 *p = (u32 *)&desc;
mcl->args[0] = maddr;
mcl->args[1] = maddr >> 32;
mcl->args[2] = *p++;
mcl->args[3] = *p;
}
trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
} }
static inline void static inline void
......
...@@ -35,6 +35,7 @@ void __xenmem_reservation_va_mapping_update(unsigned long count, ...@@ -35,6 +35,7 @@ void __xenmem_reservation_va_mapping_update(unsigned long count,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct page *page = pages[i]; struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
int ret;
BUG_ON(!page); BUG_ON(!page);
...@@ -46,16 +47,10 @@ void __xenmem_reservation_va_mapping_update(unsigned long count, ...@@ -46,16 +47,10 @@ void __xenmem_reservation_va_mapping_update(unsigned long count,
set_phys_to_machine(pfn, frames[i]); set_phys_to_machine(pfn, frames[i]);
/* Link back into the page tables if not highmem. */ ret = HYPERVISOR_update_va_mapping(
if (!PageHighMem(page)) { (unsigned long)__va(pfn << PAGE_SHIFT),
int ret; mfn_pte(frames[i], PAGE_KERNEL), 0);
BUG_ON(ret);
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frames[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
} }
} }
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update); EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
...@@ -68,6 +63,7 @@ void __xenmem_reservation_va_mapping_reset(unsigned long count, ...@@ -68,6 +63,7 @@ void __xenmem_reservation_va_mapping_reset(unsigned long count,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct page *page = pages[i]; struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
int ret;
/* /*
* We don't support PV MMU when Linux and Xen are using * We don't support PV MMU when Linux and Xen are using
...@@ -75,14 +71,11 @@ void __xenmem_reservation_va_mapping_reset(unsigned long count, ...@@ -75,14 +71,11 @@ void __xenmem_reservation_va_mapping_reset(unsigned long count,
*/ */
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!PageHighMem(page)) { ret = HYPERVISOR_update_va_mapping(
int ret; (unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY); __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment