arm_arch_timer.h 4.14 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright (C) 2012 ARM Ltd.
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

#ifndef __ASM_ARM_KVM_ARCH_TIMER_H
#define __ASM_ARM_KVM_ARCH_TIMER_H

#include <linux/clocksource.h>
#include <linux/hrtimer.h>

13 14 15
enum kvm_arch_timers {
	TIMER_PTIMER,
	TIMER_VTIMER,
16 17 18
	NR_KVM_EL0_TIMERS,
	TIMER_HVTIMER = NR_KVM_EL0_TIMERS,
	TIMER_HPTIMER,
19 20 21 22 23 24 25 26
	NR_KVM_TIMERS
};

enum kvm_arch_timer_regs {
	TIMER_REG_CNT,
	TIMER_REG_CVAL,
	TIMER_REG_TVAL,
	TIMER_REG_CTL,
27
	TIMER_REG_VOFF,
28 29
};

30 31 32 33 34 35
struct arch_timer_offset {
	/*
	 * If set, pointer to one of the offsets in the kvm's offset
	 * structure. If NULL, assume a zero offset.
	 */
	u64	*vm_offset;
36 37 38 39 40
	/*
	 * If set, pointer to one of the offsets in the vcpu's sysreg
	 * array. If NULL, assume a zero offset.
	 */
	u64	*vcpu_offset;
41 42 43 44 45
};

struct arch_timer_vm_data {
	/* Offset applied to the virtual timer/counter */
	u64	voffset;
46 47
	/* Offset applied to the physical timer/counter */
	u64	poffset;
48 49 50

	/* The PPI for each timer, global to the VM */
	u8	ppi[NR_KVM_TIMERS];
51 52
};

53
struct arch_timer_context {
54 55 56 57
	struct kvm_vcpu			*vcpu;

	/* Emulated Timer (may be unused) */
	struct hrtimer			hrtimer;
58
	u64				ns_frac;
59

60 61
	/* Offset for this counter/timer */
	struct arch_timer_offset	offset;
62 63 64 65 66 67 68
	/*
	 * We have multiple paths which can save/restore the timer state onto
	 * the hardware, so we need some way of keeping track of where the
	 * latest state is.
	 */
	bool				loaded;

69 70 71 72 73
	/* Output level of the timer IRQ */
	struct {
		bool			level;
	} irq;

74 75
	/* Duplicated state from arch_timer.c for convenience */
	u32				host_timer_irq;
76
};
77

78 79 80
struct timer_map {
	struct arch_timer_context *direct_vtimer;
	struct arch_timer_context *direct_ptimer;
81
	struct arch_timer_context *emul_vtimer;
82 83 84
	struct arch_timer_context *emul_ptimer;
};

85 86
void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);

87
struct arch_timer_cpu {
88
	struct arch_timer_context timers[NR_KVM_TIMERS];
89 90

	/* Background timer used when the guest is not running */
91
	struct hrtimer			bg_timer;
92

93 94
	/* Is the timer enabled */
	bool			enabled;
95 96
};

97
int __init kvm_timer_hyp_init(bool has_gic);
98
int kvm_timer_enable(struct kvm_vcpu *vcpu);
99
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
100
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
101
void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
102 103
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
void kvm_timer_update_run(struct kvm_vcpu *vcpu);
104
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
105

106 107
void kvm_timer_init_vm(struct kvm *kvm);

108 109 110
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);

111 112 113 114
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);

115 116
u64 kvm_phys_timer_read(void);

117
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
118 119
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);

120
void kvm_timer_init_vhe(void);
121

122 123 124 125
#define vcpu_timer(v)	(&(v)->arch.timer_cpu)
#define vcpu_get_timer(v,t)	(&vcpu_timer(v)->timers[(t)])
#define vcpu_vtimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
#define vcpu_ptimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
126 127
#define vcpu_hvtimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
#define vcpu_hptimer(v)	(&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
128

129 130
#define arch_timer_ctx_index(ctx)	((ctx) - vcpu_timer((ctx)->vcpu)->timers)

131 132
#define timer_vm_data(ctx)		(&(ctx)->vcpu->kvm->arch.timer_data)
#define timer_irq(ctx)			(timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
133

134 135 136 137 138 139 140
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
			      enum kvm_arch_timers tmr,
			      enum kvm_arch_timer_regs treg);
void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
				enum kvm_arch_timers tmr,
				enum kvm_arch_timer_regs treg,
				u64 val);
141

142 143 144 145
/* Needed for tracing */
u32 timer_get_ctl(struct arch_timer_context *ctxt);
u64 timer_get_cval(struct arch_timer_context *ctxt);

146 147 148 149
/* CPU HP callbacks */
void kvm_timer_cpu_up(void);
void kvm_timer_cpu_down(void);

150 151 152 153 154
static inline bool has_cntpoff(void)
{
	return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
}

155
#endif