Commit 4cbc418a authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'cve-2019-3016' into kvm-next-5.6

From Boris Ostrovsky:

The KVM hypervisor may provide a guest with ability to defer remote TLB
flush when the remote VCPU is not running. When this feature is used,
the TLB flush will happen only when the remote VPCU is scheduled to run
again. This will avoid unnecessary (and expensive) IPIs.

Under certain circumstances, when a guest initiates such deferred action,
the hypervisor may miss the request. It is also possible that the guest
may mistakenly assume that it has already marked remote VCPU as needing
a flush when in fact that request had already been processed by the
hypervisor. In both cases this will result in an invalid translation
being present in a vCPU, potentially allowing accesses to memory locations
in that guest's address space that should not be accessible.

Note that only intra-guest memory is vulnerable.

The five patches address both of these problems:
1. The first patch makes sure the hypervisor doesn't accidentally clear
a guest's remote flush request
2. The rest of the patches prevent the race between hypervisor
acknowledging a remote flush request and guest issuing a new one.

Conflicts:
	arch/x86/kvm/x86.c [move from kvm_arch_vcpu_free to kvm_arch_vcpu_destroy]
parents 1d5920c3 a6bd811f
...@@ -111,7 +111,7 @@ patternProperties: ...@@ -111,7 +111,7 @@ patternProperties:
spi-rx-bus-width: spi-rx-bus-width:
allOf: allOf:
- $ref: /schemas/types.yaml#/definitions/uint32 - $ref: /schemas/types.yaml#/definitions/uint32
- enum: [ 1, 2, 4 ] - enum: [ 1, 2, 4, 8 ]
- default: 1 - default: 1
description: description:
Bus width to the SPI bus used for MISO. Bus width to the SPI bus used for MISO.
...@@ -123,7 +123,7 @@ patternProperties: ...@@ -123,7 +123,7 @@ patternProperties:
spi-tx-bus-width: spi-tx-bus-width:
allOf: allOf:
- $ref: /schemas/types.yaml#/definitions/uint32 - $ref: /schemas/types.yaml#/definitions/uint32
- enum: [ 1, 2, 4 ] - enum: [ 1, 2, 4, 8 ]
- default: 1 - default: 1
description: description:
Bus width to the SPI bus used for MOSI. Bus width to the SPI bus used for MOSI.
......
...@@ -162,7 +162,7 @@ ...@@ -162,7 +162,7 @@
#endif #endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS #ifdef CONFIG_ARC_HAS_ACCL_REGS
ST2 r58, r59, PT_sp + 12 ST2 r58, r59, PT_r58
#endif #endif
.endm .endm
...@@ -172,8 +172,8 @@ ...@@ -172,8 +172,8 @@
LD2 gp, fp, PT_r26 ; gp (r26), fp (r27) LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
ld r12, [sp, PT_sp + 4] ld r12, [sp, PT_r12]
ld r30, [sp, PT_sp + 8] ld r30, [sp, PT_r30]
; Restore SP (into AUX_USER_SP) only if returning to U mode ; Restore SP (into AUX_USER_SP) only if returning to U mode
; - for K mode, it will be implicitly restored as stack is unwound ; - for K mode, it will be implicitly restored as stack is unwound
...@@ -190,7 +190,7 @@ ...@@ -190,7 +190,7 @@
#endif #endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS #ifdef CONFIG_ARC_HAS_ACCL_REGS
LD2 r58, r59, PT_sp + 12 LD2 r58, r59, PT_r58
#endif #endif
.endm .endm
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#define _ASM_ARC_HUGEPAGE_H #define _ASM_ARC_HUGEPAGE_H
#include <linux/types.h> #include <linux/types.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h> #include <asm-generic/pgtable-nopmd.h>
static inline pte_t pmd_pte(pmd_t pmd) static inline pte_t pmd_pte(pmd_t pmd)
......
...@@ -66,7 +66,15 @@ int main(void) ...@@ -66,7 +66,15 @@ int main(void)
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
#ifdef CONFIG_ISA_ARCV2
OFFSET(PT_r12, pt_regs, r12);
OFFSET(PT_r30, pt_regs, r30);
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS
OFFSET(PT_r58, pt_regs, r58);
OFFSET(PT_r59, pt_regs, r59);
#endif
return 0; return 0;
} }
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
menuconfig ARC_PLAT_EZNPS menuconfig ARC_PLAT_EZNPS
bool "\"EZchip\" ARC dev platform" bool "\"EZchip\" ARC dev platform"
select CPU_BIG_ENDIAN select CPU_BIG_ENDIAN
select CLKSRC_NPS select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
select EZNPS_GIC select EZNPS_GIC
select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
help help
......
...@@ -85,13 +85,12 @@ ...@@ -85,13 +85,12 @@
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
#define __P000 PAGE_NONE #define __P000 PAGE_NONE
#define __P001 PAGE_READONLY #define __P001 PAGE_READONLY
#define __P010 PAGE_READONLY #define __P010 PAGE_READONLY
#define __P011 PAGE_READONLY #define __P011 PAGE_READONLY
#define __P100 PAGE_EXECONLY #define __P100 PAGE_READONLY_EXEC
#define __P101 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_READONLY_EXEC #define __P110 PAGE_READONLY_EXEC
#define __P111 PAGE_READONLY_EXEC #define __P111 PAGE_READONLY_EXEC
...@@ -100,7 +99,7 @@ ...@@ -100,7 +99,7 @@
#define __S001 PAGE_READONLY #define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED #define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED #define __S011 PAGE_SHARED
#define __S100 PAGE_EXECONLY #define __S100 PAGE_READONLY_EXEC
#define __S101 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC #define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC
......
...@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ...@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
/*
* Execute-only user mappings do not have the PTE_USER bit set. All valid
* kernel mappings have the PTE_UXN bit set.
*/
#define pte_valid_not_user(pte) \ #define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
#define pte_valid_young(pte) \ #define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \ #define pte_valid_user(pte) \
...@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ...@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
/* /*
* p??_access_permitted() is true for valid user mappings (subject to the * p??_access_permitted() is true for valid user mappings (subject to the
* write permission check) other than user execute-only which do not have the * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
* PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. * set.
*/ */
#define pte_access_permitted(pte, write) \ #define pte_access_permitted(pte, write) \
(pte_valid_user(pte) && (!(write) || pte_write(pte))) (pte_valid_user(pte) && (!(write) || pte_write(pte)))
......
...@@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
const struct fault_info *inf; const struct fault_info *inf;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
vm_fault_t fault, major = 0; vm_fault_t fault, major = 0;
unsigned long vm_flags = VM_READ | VM_WRITE; unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (kprobe_page_fault(regs, esr)) if (kprobe_page_fault(regs, esr))
......
...@@ -690,10 +690,10 @@ struct kvm_vcpu_arch { ...@@ -690,10 +690,10 @@ struct kvm_vcpu_arch {
bool pvclock_set_guest_stopped_request; bool pvclock_set_guest_stopped_request;
struct { struct {
u8 preempted;
u64 msr_val; u64 msr_val;
u64 last_steal; u64 last_steal;
struct gfn_to_hva_cache stime; struct gfn_to_pfn_cache cache;
struct kvm_steal_time steal;
} st; } st;
u64 tsc_offset; u64 tsc_offset;
......
...@@ -2648,45 +2648,47 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) ...@@ -2648,45 +2648,47 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
static void record_steal_time(struct kvm_vcpu *vcpu) static void record_steal_time(struct kvm_vcpu *vcpu)
{ {
struct kvm_host_map map;
struct kvm_steal_time *st;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return; return;
if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, /* -EAGAIN is returned in atomic context so we can just return. */
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
&map, &vcpu->arch.st.cache, false))
return; return;
st = map.hva +
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
/* /*
* Doing a TLB flush here, on the guest's behalf, can avoid * Doing a TLB flush here, on the guest's behalf, can avoid
* expensive IPIs. * expensive IPIs.
*/ */
trace_kvm_pv_tlb_flush(vcpu->vcpu_id, trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
vcpu->arch.st.steal.preempted & KVM_VCPU_FLUSH_TLB); st->preempted & KVM_VCPU_FLUSH_TLB);
if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB) if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb(vcpu, false); kvm_vcpu_flush_tlb(vcpu, false);
if (vcpu->arch.st.steal.version & 1) vcpu->arch.st.preempted = 0;
vcpu->arch.st.steal.version += 1; /* first time write, random junk */
vcpu->arch.st.steal.version += 1; if (st->version & 1)
st->version += 1; /* first time write, random junk */
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, st->version += 1;
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
smp_wmb(); smp_wmb();
vcpu->arch.st.steal.steal += current->sched_info.run_delay - st->steal += current->sched_info.run_delay -
vcpu->arch.st.last_steal; vcpu->arch.st.last_steal;
vcpu->arch.st.last_steal = current->sched_info.run_delay; vcpu->arch.st.last_steal = current->sched_info.run_delay;
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
smp_wmb(); smp_wmb();
vcpu->arch.st.steal.version += 1; st->version += 1;
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
} }
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
...@@ -2853,11 +2855,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2853,11 +2855,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & KVM_STEAL_RESERVED_MASK) if (data & KVM_STEAL_RESERVED_MASK)
return 1; return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS,
sizeof(struct kvm_steal_time)))
return 1;
vcpu->arch.st.msr_val = data; vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED)) if (!(data & KVM_MSR_ENABLED))
...@@ -3567,15 +3564,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -3567,15 +3564,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{ {
struct kvm_host_map map;
struct kvm_steal_time *st;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return; return;
vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED; if (vcpu->arch.st.preempted)
return;
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
&vcpu->arch.st.cache, true))
return;
st = map.hva +
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime, kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
&vcpu->arch.st.steal.preempted,
offsetof(struct kvm_steal_time, preempted),
sizeof(vcpu->arch.st.steal.preempted));
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
...@@ -9325,8 +9332,11 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -9325,8 +9332,11 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{ {
struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
int idx; int idx;
kvm_release_pfn(cache->pfn, cache->dirty, cache);
kvmclock_reset(vcpu); kvmclock_reset(vcpu);
kvm_x86_ops->vcpu_free(vcpu); kvm_x86_ops->vcpu_free(vcpu);
...@@ -9855,11 +9865,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ...@@ -9855,11 +9865,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{ {
struct kvm_vcpu *vcpu;
int i;
/* /*
* memslots->generation has been incremented. * memslots->generation has been incremented.
* mmio generation may have reached its maximum value. * mmio generation may have reached its maximum value.
*/ */
kvm_mmu_invalidate_mmio_sptes(kvm, gen); kvm_mmu_invalidate_mmio_sptes(kvm, gen);
/* Force re-initialization of steal_time cache */
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vcpu_kick(vcpu);
} }
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
......
...@@ -978,13 +978,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, ...@@ -978,13 +978,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (wait_startup(chip, 0) != 0) { if (wait_startup(chip, 0) != 0) {
rc = -ENODEV; rc = -ENODEV;
goto err_start; goto out_err;
} }
/* Take control of the TPM's interrupt hardware and shut it off */ /* Take control of the TPM's interrupt hardware and shut it off */
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0) if (rc < 0)
goto err_start; goto out_err;
intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
...@@ -993,21 +993,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, ...@@ -993,21 +993,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
rc = tpm_chip_start(chip); rc = tpm_chip_start(chip);
if (rc) if (rc)
goto err_start; goto out_err;
rc = tpm2_probe(chip); rc = tpm2_probe(chip);
tpm_chip_stop(chip);
if (rc) if (rc)
goto err_probe; goto out_err;
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor); rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
if (rc < 0) if (rc < 0)
goto err_probe; goto out_err;
priv->manufacturer_id = vendor; priv->manufacturer_id = vendor;
rc = tpm_tis_read8(priv, TPM_RID(0), &rid); rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
if (rc < 0) if (rc < 0)
goto err_probe; goto out_err;
dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n", dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
(chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2", (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
...@@ -1016,13 +1016,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, ...@@ -1016,13 +1016,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
probe = probe_itpm(chip); probe = probe_itpm(chip);
if (probe < 0) { if (probe < 0) {
rc = -ENODEV; rc = -ENODEV;
goto err_probe; goto out_err;
} }
/* Figure out the capabilities */ /* Figure out the capabilities */
rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps); rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
if (rc < 0) if (rc < 0)
goto err_probe; goto out_err;
dev_dbg(dev, "TPM interface capabilities (0x%x):\n", dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
intfcaps); intfcaps);
...@@ -1056,10 +1056,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, ...@@ -1056,10 +1056,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (tpm_get_timeouts(chip)) { if (tpm_get_timeouts(chip)) {
dev_err(dev, "Could not get TPM timeouts and durations\n"); dev_err(dev, "Could not get TPM timeouts and durations\n");
rc = -ENODEV; rc = -ENODEV;
goto err_probe; goto out_err;
} }
chip->flags |= TPM_CHIP_FLAG_IRQ;
if (irq) { if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq); irq);
...@@ -1071,18 +1070,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, ...@@ -1071,18 +1070,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
} }
} }
tpm_chip_stop(chip);
rc = tpm_chip_register(chip); rc = tpm_chip_register(chip);
if (rc) if (rc)
goto err_start; goto out_err;
return 0;
err_probe: if (chip->ops->clk_enable != NULL)
tpm_chip_stop(chip); chip->ops->clk_enable(chip, false);
err_start: return 0;
out_err:
if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL)) if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
chip->ops->clk_enable(chip, false); chip->ops->clk_enable(chip, false);
......
...@@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) ...@@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
int i; int i;
for (i = 0; i < rate_count; i++) { for (i = 0; i < rate_count; i++) {
if (ramp <= slew_rates[i]) if (ramp > slew_rates[i])
cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
else
break; break;
if (id == AXP20X_DCDC2)
cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i);
else
cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
} }
if (cfg == 0xff) { if (cfg == 0xff) {
...@@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = { ...@@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100, AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100, AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK), AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
......
...@@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = { ...@@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel, .set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = bd70528_set_ramp_delay,
}; };
static const struct regulator_ops bd70528_led_ops = { static const struct regulator_ops bd70528_led_ops = {
......
...@@ -172,20 +172,7 @@ int mc146818_set_time(struct rtc_time *time) ...@@ -172,20 +172,7 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL); save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT); save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_X86
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 == 0x17) ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)),
RTC_FREQ_SELECT);
save_freq_select &= ~RTC_DIV_RESET2;
} else
CMOS_WRITE((save_freq_select | RTC_DIV_RESET2),
RTC_FREQ_SELECT);
#else
CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
#endif
#ifdef CONFIG_MACH_DECSTATION #ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR); CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
......
...@@ -47,7 +47,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data) ...@@ -47,7 +47,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data)
irqen = irqsta & ~RTC_IRQ_EN_AL; irqen = irqsta & ~RTC_IRQ_EN_AL;
mutex_lock(&rtc->lock); mutex_lock(&rtc->lock);
if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN, if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN,
irqen) < 0) irqen) == 0)
mtk_rtc_write_trigger(rtc); mtk_rtc_write_trigger(rtc);
mutex_unlock(&rtc->lock); mutex_unlock(&rtc->lock);
...@@ -169,12 +169,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) ...@@ -169,12 +169,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM); alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM);
mutex_unlock(&rtc->lock); mutex_unlock(&rtc->lock);
tm->tm_sec = data[RTC_OFFSET_SEC]; tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK;
tm->tm_min = data[RTC_OFFSET_MIN]; tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK;
tm->tm_hour = data[RTC_OFFSET_HOUR]; tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK;
tm->tm_mday = data[RTC_OFFSET_DOM]; tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK;
tm->tm_mon = data[RTC_OFFSET_MTH]; tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK;
tm->tm_year = data[RTC_OFFSET_YEAR]; tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK;
tm->tm_year += RTC_MIN_YEAR_OFFSET; tm->tm_year += RTC_MIN_YEAR_OFFSET;
tm->tm_mon--; tm->tm_mon--;
...@@ -195,14 +195,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) ...@@ -195,14 +195,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
tm->tm_year -= RTC_MIN_YEAR_OFFSET; tm->tm_year -= RTC_MIN_YEAR_OFFSET;
tm->tm_mon++; tm->tm_mon++;
data[RTC_OFFSET_SEC] = tm->tm_sec;
data[RTC_OFFSET_MIN] = tm->tm_min;
data[RTC_OFFSET_HOUR] = tm->tm_hour;
data[RTC_OFFSET_DOM] = tm->tm_mday;
data[RTC_OFFSET_MTH] = tm->tm_mon;
data[RTC_OFFSET_YEAR] = tm->tm_year;
mutex_lock(&rtc->lock); mutex_lock(&rtc->lock);
ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC,
data, RTC_OFFSET_COUNT);
if (ret < 0)
goto exit;
data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) |
(tm->tm_sec & RTC_AL_SEC_MASK));
data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) |
(tm->tm_min & RTC_AL_MIN_MASK));
data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) |
(tm->tm_hour & RTC_AL_HOU_MASK));
data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) |
(tm->tm_mday & RTC_AL_DOM_MASK));
data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) |
(tm->tm_mon & RTC_AL_MTH_MASK));
data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) |
(tm->tm_year & RTC_AL_YEA_MASK));
if (alm->enabled) { if (alm->enabled) {
ret = regmap_bulk_write(rtc->regmap, ret = regmap_bulk_write(rtc->regmap,
rtc->addr_base + RTC_AL_SEC, rtc->addr_base + RTC_AL_SEC,
......
...@@ -379,6 +379,22 @@ static void __init sun50i_h6_rtc_clk_init(struct device_node *node) ...@@ -379,6 +379,22 @@ static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc", CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
sun50i_h6_rtc_clk_init); sun50i_h6_rtc_clk_init);
/*
* The R40 user manual is self-conflicting on whether the prescaler is
* fixed or configurable. The clock diagram shows it as fixed, but there
* is also a configurable divider in the RTC block.
*/
static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = {
.rc_osc_rate = 16000000,
.fixed_prescaler = 512,
};
static void __init sun8i_r40_rtc_clk_init(struct device_node *node)
{
sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data);
}
CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc",
sun8i_r40_rtc_clk_init);
static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = { static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = {
.rc_osc_rate = 32000, .rc_osc_rate = 32000,
.has_out_clk = 1, .has_out_clk = 1,
......
...@@ -172,9 +172,11 @@ static inline u32 rx_max(struct dw_spi *dws) ...@@ -172,9 +172,11 @@ static inline u32 rx_max(struct dw_spi *dws)
static void dw_writer(struct dw_spi *dws) static void dw_writer(struct dw_spi *dws)
{ {
u32 max = tx_max(dws); u32 max;
u16 txw = 0; u16 txw = 0;
spin_lock(&dws->buf_lock);
max = tx_max(dws);
while (max--) { while (max--) {
/* Set the tx word if the transfer's original "tx" is not null */ /* Set the tx word if the transfer's original "tx" is not null */
if (dws->tx_end - dws->len) { if (dws->tx_end - dws->len) {
...@@ -186,13 +188,16 @@ static void dw_writer(struct dw_spi *dws) ...@@ -186,13 +188,16 @@ static void dw_writer(struct dw_spi *dws)
dw_write_io_reg(dws, DW_SPI_DR, txw); dw_write_io_reg(dws, DW_SPI_DR, txw);
dws->tx += dws->n_bytes; dws->tx += dws->n_bytes;
} }
spin_unlock(&dws->buf_lock);
} }
static void dw_reader(struct dw_spi *dws) static void dw_reader(struct dw_spi *dws)
{ {
u32 max = rx_max(dws); u32 max;
u16 rxw; u16 rxw;
spin_lock(&dws->buf_lock);
max = rx_max(dws);
while (max--) { while (max--) {
rxw = dw_read_io_reg(dws, DW_SPI_DR); rxw = dw_read_io_reg(dws, DW_SPI_DR);
/* Care rx only if the transfer's original "rx" is not null */ /* Care rx only if the transfer's original "rx" is not null */
...@@ -204,6 +209,7 @@ static void dw_reader(struct dw_spi *dws) ...@@ -204,6 +209,7 @@ static void dw_reader(struct dw_spi *dws)
} }
dws->rx += dws->n_bytes; dws->rx += dws->n_bytes;
} }
spin_unlock(&dws->buf_lock);
} }
static void int_error_stop(struct dw_spi *dws, const char *msg) static void int_error_stop(struct dw_spi *dws, const char *msg)
...@@ -276,18 +282,20 @@ static int dw_spi_transfer_one(struct spi_controller *master, ...@@ -276,18 +282,20 @@ static int dw_spi_transfer_one(struct spi_controller *master,
{ {
struct dw_spi *dws = spi_controller_get_devdata(master); struct dw_spi *dws = spi_controller_get_devdata(master);
struct chip_data *chip = spi_get_ctldata(spi); struct chip_data *chip = spi_get_ctldata(spi);
unsigned long flags;
u8 imask = 0; u8 imask = 0;
u16 txlevel = 0; u16 txlevel = 0;
u32 cr0; u32 cr0;
int ret; int ret;
dws->dma_mapped = 0; dws->dma_mapped = 0;
spin_lock_irqsave(&dws->buf_lock, flags);
dws->tx = (void *)transfer->tx_buf; dws->tx = (void *)transfer->tx_buf;
dws->tx_end = dws->tx + transfer->len; dws->tx_end = dws->tx + transfer->len;
dws->rx = transfer->rx_buf; dws->rx = transfer->rx_buf;
dws->rx_end = dws->rx + transfer->len; dws->rx_end = dws->rx + transfer->len;
dws->len = transfer->len; dws->len = transfer->len;
spin_unlock_irqrestore(&dws->buf_lock, flags);
spi_enable_chip(dws, 0); spi_enable_chip(dws, 0);
...@@ -471,6 +479,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) ...@@ -471,6 +479,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->type = SSI_MOTO_SPI; dws->type = SSI_MOTO_SPI;
dws->dma_inited = 0; dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
spin_lock_init(&dws->buf_lock);
spi_controller_set_devdata(master, dws); spi_controller_set_devdata(master, dws);
......
...@@ -119,6 +119,7 @@ struct dw_spi { ...@@ -119,6 +119,7 @@ struct dw_spi {
size_t len; size_t len;
void *tx; void *tx;
void *tx_end; void *tx_end;
spinlock_t buf_lock;
void *rx; void *rx;
void *rx_end; void *rx_end;
int dma_mapped; int dma_mapped;
......
...@@ -185,6 +185,7 @@ struct fsl_dspi { ...@@ -185,6 +185,7 @@ struct fsl_dspi {
struct spi_transfer *cur_transfer; struct spi_transfer *cur_transfer;
struct spi_message *cur_msg; struct spi_message *cur_msg;
struct chip_data *cur_chip; struct chip_data *cur_chip;
size_t progress;
size_t len; size_t len;
const void *tx; const void *tx;
void *rx; void *rx;
...@@ -586,21 +587,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi) ...@@ -586,21 +587,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi)
dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) { if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
/* Write two TX FIFO entries first, and then the corresponding /* Write the CMD FIFO entry first, and then the two
* CMD FIFO entry. * corresponding TX FIFO entries.
*/ */
u32 data = dspi_pop_tx(dspi); u32 data = dspi_pop_tx(dspi);
if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) { cmd_fifo_write(dspi);
/* LSB */
tx_fifo_write(dspi, data & 0xFFFF); tx_fifo_write(dspi, data & 0xFFFF);
tx_fifo_write(dspi, data >> 16); tx_fifo_write(dspi, data >> 16);
} else {
/* MSB */
tx_fifo_write(dspi, data >> 16);
tx_fifo_write(dspi, data & 0xFFFF);
}
cmd_fifo_write(dspi);
} else { } else {
/* Write one entry to both TX FIFO and CMD FIFO /* Write one entry to both TX FIFO and CMD FIFO
* simultaneously. * simultaneously.
...@@ -658,7 +652,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) ...@@ -658,7 +652,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
u32 spi_tcr; u32 spi_tcr;
spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
dspi->tx - dspi->bytes_per_word, !dspi->irq); dspi->progress, !dspi->irq);
/* Get transfer counter (in number of SPI transfers). It was /* Get transfer counter (in number of SPI transfers). It was
* reset to 0 when transfer(s) were started. * reset to 0 when transfer(s) were started.
...@@ -667,6 +661,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) ...@@ -667,6 +661,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr); spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
/* Update total number of bytes that were transferred */ /* Update total number of bytes that were transferred */
msg->actual_length += spi_tcnt * dspi->bytes_per_word; msg->actual_length += spi_tcnt * dspi->bytes_per_word;
dspi->progress += spi_tcnt;
trans_mode = dspi->devtype_data->trans_mode; trans_mode = dspi->devtype_data->trans_mode;
if (trans_mode == DSPI_EOQ_MODE) if (trans_mode == DSPI_EOQ_MODE)
...@@ -679,7 +674,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) ...@@ -679,7 +674,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
return 0; return 0;
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
dspi->tx, !dspi->irq); dspi->progress, !dspi->irq);
if (trans_mode == DSPI_EOQ_MODE) if (trans_mode == DSPI_EOQ_MODE)
dspi_eoq_write(dspi); dspi_eoq_write(dspi);
...@@ -768,6 +763,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, ...@@ -768,6 +763,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->rx = transfer->rx_buf; dspi->rx = transfer->rx_buf;
dspi->rx_end = dspi->rx + transfer->len; dspi->rx_end = dspi->rx + transfer->len;
dspi->len = transfer->len; dspi->len = transfer->len;
dspi->progress = 0;
/* Validated transfer specific frame size (defaults applied) */ /* Validated transfer specific frame size (defaults applied) */
dspi->bits_per_word = transfer->bits_per_word; dspi->bits_per_word = transfer->bits_per_word;
if (transfer->bits_per_word <= 8) if (transfer->bits_per_word <= 8)
...@@ -789,7 +785,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, ...@@ -789,7 +785,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
SPI_CTARE_DTCP(1)); SPI_CTARE_DTCP(1));
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
dspi->tx, !dspi->irq); dspi->progress, !dspi->irq);
trans_mode = dspi->devtype_data->trans_mode; trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) { switch (trans_mode) {
......
...@@ -290,25 +290,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv) ...@@ -290,25 +290,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
} }
} }
static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
unsigned int threshold)
{ {
unsigned int fifo_threshold, fill_bytes;
u32 val; u32 val;
fifo_threshold = DIV_ROUND_UP(priv->rx_bytes,
bytes_per_word(priv->bits_per_word));
fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes);
/* set fifo threshold */
val = readl(priv->base + SSI_FC); val = readl(priv->base + SSI_FC);
val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK); val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold); val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold); val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
writel(val, priv->base + SSI_FC); writel(val, priv->base + SSI_FC);
}
static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
{
unsigned int fifo_threshold, fill_words;
unsigned int bpw = bytes_per_word(priv->bits_per_word);
fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
fill_words = fifo_threshold -
DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
while (fill_bytes--) while (fill_words--)
uniphier_spi_send(priv); uniphier_spi_send(priv);
} }
......
...@@ -1499,8 +1499,7 @@ static void spi_pump_messages(struct kthread_work *work) ...@@ -1499,8 +1499,7 @@ static void spi_pump_messages(struct kthread_work *work)
* advances its @tx buffer pointer monotonically. * advances its @tx buffer pointer monotonically.
* @ctlr: Pointer to the spi_controller structure of the driver * @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped * @xfer: Pointer to the transfer being timestamped
* @tx: Pointer to the current word within the xfer->tx_buf that the driver is * @progress: How many words (not bytes) have been transferred so far
* preparing to transmit right now.
* @irqs_off: If true, will disable IRQs and preemption for the duration of the * @irqs_off: If true, will disable IRQs and preemption for the duration of the
* transfer, for less jitter in time measurement. Only compatible * transfer, for less jitter in time measurement. Only compatible
* with PIO drivers. If true, must follow up with * with PIO drivers. If true, must follow up with
...@@ -1510,21 +1509,19 @@ static void spi_pump_messages(struct kthread_work *work) ...@@ -1510,21 +1509,19 @@ static void spi_pump_messages(struct kthread_work *work)
*/ */
void spi_take_timestamp_pre(struct spi_controller *ctlr, void spi_take_timestamp_pre(struct spi_controller *ctlr,
struct spi_transfer *xfer, struct spi_transfer *xfer,
const void *tx, bool irqs_off) size_t progress, bool irqs_off)
{ {
u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
if (!xfer->ptp_sts) if (!xfer->ptp_sts)
return; return;
if (xfer->timestamped_pre) if (xfer->timestamped_pre)
return; return;
if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word)) if (progress < xfer->ptp_sts_word_pre)
return; return;
/* Capture the resolution of the timestamp */ /* Capture the resolution of the timestamp */
xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word; xfer->ptp_sts_word_pre = progress;
xfer->timestamped_pre = true; xfer->timestamped_pre = true;
...@@ -1546,23 +1543,20 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); ...@@ -1546,23 +1543,20 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
* timestamped. * timestamped.
* @ctlr: Pointer to the spi_controller structure of the driver * @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped * @xfer: Pointer to the transfer being timestamped
* @tx: Pointer to the current word within the xfer->tx_buf that the driver has * @progress: How many words (not bytes) have been transferred so far
* just transmitted.
* @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
*/ */
void spi_take_timestamp_post(struct spi_controller *ctlr, void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer, struct spi_transfer *xfer,
const void *tx, bool irqs_off) size_t progress, bool irqs_off)
{ {
u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
if (!xfer->ptp_sts) if (!xfer->ptp_sts)
return; return;
if (xfer->timestamped_post) if (xfer->timestamped_post)
return; return;
if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word)) if (progress < xfer->ptp_sts_word_post)
return; return;
ptp_read_system_postts(xfer->ptp_sts); ptp_read_system_postts(xfer->ptp_sts);
...@@ -1573,7 +1567,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, ...@@ -1573,7 +1567,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
} }
/* Capture the resolution of the timestamp */ /* Capture the resolution of the timestamp */
xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word; xfer->ptp_sts_word_post = progress;
xfer->timestamped_post = true; xfer->timestamped_post = true;
} }
......
...@@ -687,6 +687,7 @@ config MAX63XX_WATCHDOG ...@@ -687,6 +687,7 @@ config MAX63XX_WATCHDOG
config MAX77620_WATCHDOG config MAX77620_WATCHDOG
tristate "Maxim Max77620 Watchdog Timer" tristate "Maxim Max77620 Watchdog Timer"
depends on MFD_MAX77620 || COMPILE_TEST depends on MFD_MAX77620 || COMPILE_TEST
select WATCHDOG_CORE
help help
This is the driver for the Max77620 watchdog timer. This is the driver for the Max77620 watchdog timer.
Say 'Y' here to enable the watchdog timer support for Say 'Y' here to enable the watchdog timer support for
...@@ -1444,6 +1445,7 @@ config SMSC37B787_WDT ...@@ -1444,6 +1445,7 @@ config SMSC37B787_WDT
config TQMX86_WDT config TQMX86_WDT
tristate "TQ-Systems TQMX86 Watchdog Timer" tristate "TQ-Systems TQMX86 Watchdog Timer"
depends on X86 depends on X86
select WATCHDOG_CORE
help help
This is the driver for the hardware watchdog timer in the TQMX86 IO This is the driver for the hardware watchdog timer in the TQMX86 IO
controller found on some of their ComExpress Modules. controller found on some of their ComExpress Modules.
......
...@@ -112,7 +112,7 @@ static int imx7ulp_wdt_restart(struct watchdog_device *wdog, ...@@ -112,7 +112,7 @@ static int imx7ulp_wdt_restart(struct watchdog_device *wdog,
{ {
struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog); struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
imx7ulp_wdt_enable(wdt->base, true); imx7ulp_wdt_enable(wdog, true);
imx7ulp_wdt_set_timeout(&wdt->wdd, 1); imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
/* wait for wdog to fire */ /* wait for wdog to fire */
......
...@@ -602,7 +602,7 @@ static int orion_wdt_probe(struct platform_device *pdev) ...@@ -602,7 +602,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &dev->wdt.status); set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
/* Request the IRQ only after the watchdog is disabled */ /* Request the IRQ only after the watchdog is disabled */
irq = platform_get_irq(pdev, 0); irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) { if (irq > 0) {
/* /*
* Not all supported platforms specify an interrupt for the * Not all supported platforms specify an interrupt for the
...@@ -617,7 +617,7 @@ static int orion_wdt_probe(struct platform_device *pdev) ...@@ -617,7 +617,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
} }
/* Optional 2nd interrupt for pretimeout */ /* Optional 2nd interrupt for pretimeout */
irq = platform_get_irq(pdev, 1); irq = platform_get_irq_optional(pdev, 1);
if (irq > 0) { if (irq > 0) {
orion_wdt_info.options |= WDIOF_PRETIMEOUT; orion_wdt_info.options |= WDIOF_PRETIMEOUT;
ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq, ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq,
......
...@@ -188,6 +188,7 @@ static struct platform_driver rn5t618_wdt_driver = { ...@@ -188,6 +188,7 @@ static struct platform_driver rn5t618_wdt_driver = {
module_platform_driver(rn5t618_wdt_driver); module_platform_driver(rn5t618_wdt_driver);
MODULE_ALIAS("platform:rn5t618-wdt");
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
MODULE_DESCRIPTION("RN5T618 watchdog driver"); MODULE_DESCRIPTION("RN5T618 watchdog driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -420,7 +420,7 @@ static int wdt_find(int addr) ...@@ -420,7 +420,7 @@ static int wdt_find(int addr)
cr_wdt_csr = NCT6102D_WDT_CSR; cr_wdt_csr = NCT6102D_WDT_CSR;
break; break;
case NCT6116_ID: case NCT6116_ID:
ret = nct6102; ret = nct6116;
cr_wdt_timeout = NCT6102D_WDT_TIMEOUT; cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
cr_wdt_control = NCT6102D_WDT_CONTROL; cr_wdt_control = NCT6102D_WDT_CONTROL;
cr_wdt_csr = NCT6102D_WDT_CSR; cr_wdt_csr = NCT6102D_WDT_CSR;
......
...@@ -720,6 +720,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn); ...@@ -720,6 +720,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn);
void kvm_get_pfn(kvm_pfn_t pfn); void kvm_get_pfn(kvm_pfn_t pfn);
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len); int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
...@@ -770,8 +771,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn ...@@ -770,8 +771,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool atomic);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
......
...@@ -18,7 +18,7 @@ struct kvm_memslots; ...@@ -18,7 +18,7 @@ struct kvm_memslots;
enum kvm_mr_change; enum kvm_mr_change;
#include <asm/types.h> #include <linux/types.h>
/* /*
* Address types: * Address types:
...@@ -51,4 +51,11 @@ struct gfn_to_hva_cache { ...@@ -51,4 +51,11 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
}; };
struct gfn_to_pfn_cache {
u64 generation;
gfn_t gfn;
kvm_pfn_t pfn;
bool dirty;
};
#endif /* __KVM_TYPES_H__ */ #endif /* __KVM_TYPES_H__ */
...@@ -46,6 +46,14 @@ ...@@ -46,6 +46,14 @@
#define RTC_AL_SEC 0x0018 #define RTC_AL_SEC 0x0018
#define RTC_AL_SEC_MASK 0x003f
#define RTC_AL_MIN_MASK 0x003f
#define RTC_AL_HOU_MASK 0x001f
#define RTC_AL_DOM_MASK 0x001f
#define RTC_AL_DOW_MASK 0x0007
#define RTC_AL_MTH_MASK 0x000f
#define RTC_AL_YEA_MASK 0x007f
#define RTC_PDN2 0x002e #define RTC_PDN2 0x002e
#define RTC_PDN2_PWRON_ALARM BIT(4) #define RTC_PDN2_PWRON_ALARM BIT(4)
......
...@@ -689,10 +689,10 @@ extern void spi_finalize_current_transfer(struct spi_controller *ctlr); ...@@ -689,10 +689,10 @@ extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
/* Helper calls for driver to timestamp transfer */ /* Helper calls for driver to timestamp transfer */
void spi_take_timestamp_pre(struct spi_controller *ctlr, void spi_take_timestamp_pre(struct spi_controller *ctlr,
struct spi_transfer *xfer, struct spi_transfer *xfer,
const void *tx, bool irqs_off); size_t progress, bool irqs_off);
void spi_take_timestamp_post(struct spi_controller *ctlr, void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer, struct spi_transfer *xfer,
const void *tx, bool irqs_off); size_t progress, bool irqs_off);
/* the spi driver core manages memory for the spi_controller classdev */ /* the spi driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host, extern struct spi_controller *__spi_alloc_controller(struct device *host,
......
...@@ -18,13 +18,13 @@ DECLARE_EVENT_CLASS(preemptirq_template, ...@@ -18,13 +18,13 @@ DECLARE_EVENT_CLASS(preemptirq_template,
TP_ARGS(ip, parent_ip), TP_ARGS(ip, parent_ip),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, caller_offs) __field(s32, caller_offs)
__field(u32, parent_offs) __field(s32, parent_offs)
), ),
TP_fast_assign( TP_fast_assign(
__entry->caller_offs = (u32)(ip - (unsigned long)_stext); __entry->caller_offs = (s32)(ip - (unsigned long)_stext);
__entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext); __entry->parent_offs = (s32)(parent_ip - (unsigned long)_stext);
), ),
TP_printk("caller=%pS parent=%pS", TP_printk("caller=%pS parent=%pS",
......
...@@ -96,6 +96,20 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, ...@@ -96,6 +96,20 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func,
return 0; return 0;
} }
/*
* Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
* functions. But those archs currently don't support direct functions
* anyway, and ftrace_find_rec_direct() is just a stub for them.
* Define MCOUNT_INSN_SIZE to keep those archs compiling.
*/
#ifndef MCOUNT_INSN_SIZE
/* Make sure this only works without direct calls */
# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
# error MCOUNT_INSN_SIZE not defined with direct calls enabled
# endif
# define MCOUNT_INSN_SIZE 0
#endif
int function_graph_enter(unsigned long ret, unsigned long func, int function_graph_enter(unsigned long ret, unsigned long func,
unsigned long frame_pointer, unsigned long *retp) unsigned long frame_pointer, unsigned long *retp)
{ {
......
...@@ -526,8 +526,7 @@ static int function_stat_show(struct seq_file *m, void *v) ...@@ -526,8 +526,7 @@ static int function_stat_show(struct seq_file *m, void *v)
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
avg = rec->time; avg = div64_ul(rec->time, rec->counter);
do_div(avg, rec->counter);
if (tracing_thresh && (avg < tracing_thresh)) if (tracing_thresh && (avg < tracing_thresh))
goto out; goto out;
#endif #endif
...@@ -553,7 +552,8 @@ static int function_stat_show(struct seq_file *m, void *v) ...@@ -553,7 +552,8 @@ static int function_stat_show(struct seq_file *m, void *v)
* Divide only 1000 for ns^2 -> us^2 conversion. * Divide only 1000 for ns^2 -> us^2 conversion.
* trace_print_graph_duration will divide 1000 again. * trace_print_graph_duration will divide 1000 again.
*/ */
do_div(stddev, rec->counter * (rec->counter - 1) * 1000); stddev = div64_ul(stddev,
rec->counter * (rec->counter - 1) * 1000);
} }
trace_seq_init(&s); trace_seq_init(&s);
......
...@@ -195,7 +195,7 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry) ...@@ -195,7 +195,7 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
unsigned long irq_flags; unsigned long irq_flags;
void *entry = NULL; void *entry = NULL;
int entry_size; int entry_size;
u64 val; u64 val = 0;
int len; int len;
entry = trace_alloc_entry(call, &entry_size); entry = trace_alloc_entry(call, &entry_size);
......
...@@ -630,7 +630,7 @@ static void start_wakeup_tracer(struct trace_array *tr) ...@@ -630,7 +630,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
if (ret) { if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint" pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_migrate_task\n"); " probe to kernel_sched_migrate_task\n");
return; goto fail_deprobe_sched_switch;
} }
wakeup_reset(tr); wakeup_reset(tr);
...@@ -648,6 +648,8 @@ static void start_wakeup_tracer(struct trace_array *tr) ...@@ -648,6 +648,8 @@ static void start_wakeup_tracer(struct trace_array *tr)
printk(KERN_ERR "failed to start wakeup tracer\n"); printk(KERN_ERR "failed to start wakeup tracer\n");
return; return;
fail_deprobe_sched_switch:
unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
fail_deprobe_wake_new: fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_wakeup, NULL); unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
fail_deprobe: fail_deprobe:
......
...@@ -283,6 +283,11 @@ static void check_stack(unsigned long ip, unsigned long *stack) ...@@ -283,6 +283,11 @@ static void check_stack(unsigned long ip, unsigned long *stack)
local_irq_restore(flags); local_irq_restore(flags);
} }
/* Some archs may not define MCOUNT_INSN_SIZE */
#ifndef MCOUNT_INSN_SIZE
# define MCOUNT_INSN_SIZE 0
#endif
static void static void
stack_trace_call(unsigned long ip, unsigned long parent_ip, stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs) struct ftrace_ops *op, struct pt_regs *pt_regs)
......
...@@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm, ...@@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm,
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (copy) copy w: (no) no * w: (no) no w: (no) no w: (copy) copy w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
* On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
* MAP_PRIVATE:
* r: (no) no
* w: (no) no
* x: (yes) yes
*/ */
pgprot_t protection_map[16] __ro_after_init = { pgprot_t protection_map[16] __ro_after_init = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
......
...@@ -1817,26 +1817,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) ...@@ -1817,26 +1817,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
} }
EXPORT_SYMBOL_GPL(gfn_to_page); EXPORT_SYMBOL_GPL(gfn_to_page);
static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn, void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
struct kvm_host_map *map) {
if (pfn == 0)
return;
if (cache)
cache->pfn = cache->gfn = 0;
if (dirty)
kvm_release_pfn_dirty(pfn);
else
kvm_release_pfn_clean(pfn);
}
static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
struct gfn_to_pfn_cache *cache, u64 gen)
{
kvm_release_pfn(cache->pfn, cache->dirty, cache);
cache->pfn = gfn_to_pfn_memslot(slot, gfn);
cache->gfn = gfn;
cache->dirty = false;
cache->generation = gen;
}
static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache,
bool atomic)
{ {
kvm_pfn_t pfn; kvm_pfn_t pfn;
void *hva = NULL; void *hva = NULL;
struct page *page = KVM_UNMAPPED_PAGE; struct page *page = KVM_UNMAPPED_PAGE;
struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
u64 gen = slots->generation;
if (!map) if (!map)
return -EINVAL; return -EINVAL;
if (cache) {
if (!cache->pfn || cache->gfn != gfn ||
cache->generation != gen) {
if (atomic)
return -EAGAIN;
kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
}
pfn = cache->pfn;
} else {
if (atomic)
return -EAGAIN;
pfn = gfn_to_pfn_memslot(slot, gfn); pfn = gfn_to_pfn_memslot(slot, gfn);
}
if (is_error_noslot_pfn(pfn)) if (is_error_noslot_pfn(pfn))
return -EINVAL; return -EINVAL;
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (atomic)
hva = kmap_atomic(page);
else
hva = kmap(page); hva = kmap(page);
#ifdef CONFIG_HAS_IOMEM #ifdef CONFIG_HAS_IOMEM
} else { } else if (!atomic) {
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
} else {
return -EINVAL;
#endif #endif
} }
...@@ -1851,14 +1897,25 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn, ...@@ -1851,14 +1897,25 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
return 0; return 0;
} }
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool atomic)
{
return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
cache, atomic);
}
EXPORT_SYMBOL_GPL(kvm_map_gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
{ {
return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map); return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
NULL, false);
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_map); EXPORT_SYMBOL_GPL(kvm_vcpu_map);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
bool dirty) struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache,
bool dirty, bool atomic)
{ {
if (!map) if (!map)
return; return;
...@@ -1866,23 +1923,45 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, ...@@ -1866,23 +1923,45 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
if (!map->hva) if (!map->hva)
return; return;
if (map->page != KVM_UNMAPPED_PAGE) if (map->page != KVM_UNMAPPED_PAGE) {
if (atomic)
kunmap_atomic(map->hva);
else
kunmap(map->page); kunmap(map->page);
}
#ifdef CONFIG_HAS_IOMEM #ifdef CONFIG_HAS_IOMEM
else else if (!atomic)
memunmap(map->hva); memunmap(map->hva);
else
WARN_ONCE(1, "Unexpected unmapping in atomic context");
#endif #endif
if (dirty) { if (dirty)
kvm_vcpu_mark_page_dirty(vcpu, map->gfn); mark_page_dirty_in_slot(memslot, map->gfn);
kvm_release_pfn_dirty(map->pfn);
} else { if (cache)
kvm_release_pfn_clean(map->pfn); cache->dirty |= dirty;
} else
kvm_release_pfn(map->pfn, dirty, NULL);
map->hva = NULL; map->hva = NULL;
map->page = NULL; map->page = NULL;
} }
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
{
__kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
cache, dirty, atomic);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
{
__kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
dirty, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment