Commit 0cacb80b authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Macrofy the MSR bitmap getters and setters

Add builder macros to generate the MSR bitmap helpers to reduce the
amount of copy-paste code, especially with respect to all the magic
numbers needed to calc the correct bit location.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20211109013047.2041518-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 67f4b996
...@@ -400,68 +400,33 @@ static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, ...@@ -400,68 +400,33 @@ static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
static inline bool vmx_test_msr_bitmap_read(ulong *msr_bitmap, u32 msr) /*
{ * Note, early Intel manuals have the write-low and read-high bitmap offsets
int f = sizeof(unsigned long); * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and
* 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and
if (msr <= 0x1fff) * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and
return test_bit(msr, msr_bitmap + 0x000 / f); * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) * VM-Exit.
return test_bit(msr & 0x1fff, msr_bitmap + 0x400 / f); */
return true; #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
} static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
u32 msr) \
static inline bool vmx_test_msr_bitmap_write(ulong *msr_bitmap, u32 msr) { \
{ int f = sizeof(unsigned long); \
int f = sizeof(unsigned long); \
if (msr <= 0x1fff) \
if (msr <= 0x1fff) return bitop##_bit(msr, bitmap + base / f); \
return test_bit(msr, msr_bitmap + 0x800 / f); else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
return test_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f); return (rtype)true; \
return true;
}
static inline void vmx_clear_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
{
int f = sizeof(unsigned long);
if (msr <= 0x1fff)
__clear_bit(msr, msr_bitmap + 0x000 / f);
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
__clear_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
}
static inline void vmx_clear_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
{
int f = sizeof(unsigned long);
if (msr <= 0x1fff)
__clear_bit(msr, msr_bitmap + 0x800 / f);
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
__clear_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
}
static inline void vmx_set_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
{
int f = sizeof(unsigned long);
if (msr <= 0x1fff)
__set_bit(msr, msr_bitmap + 0x000 / f);
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
__set_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
}
static inline void vmx_set_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
{
int f = sizeof(unsigned long);
if (msr <= 0x1fff)
__set_bit(msr, msr_bitmap + 0x800 / f);
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
__set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
} }
#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
__BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
__BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
static inline u8 vmx_get_rvi(void) static inline u8 vmx_get_rvi(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment