Commit 9957c86d authored by Paul Mackerras's avatar Paul Mackerras Committed by Paolo Bonzini

KVM: Move all accesses to kvm::irq_routing into irqchip.c

Now that struct _irqfd does not keep a reference to storage pointed
to by the irq_routing field of struct kvm, we can move the statement
that updates it out from under the irqfds.lock and put it in
kvm_set_irq_routing() instead.  That means we then have to take a
srcu_read_lock on kvm->irq_srcu around the irqfd_update call in
kvm_irqfd_assign(), since holding the kvm->irqfds.lock no longer
ensures that that the routing can't change.

Combined with changing kvm_irq_map_gsi() and kvm_irq_map_chip_pin()
to take a struct kvm * argument instead of the pointer to the routing
table, this allows us to to move all references to kvm->irq_routing
into irqchip.c.  That in turn allows us to move the definition of the
kvm_irq_routing_table struct into irqchip.c as well.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Tested-by: default avatarEric Auger <eric.auger@linaro.org>
Tested-by: default avatarCornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 8ba918d4
...@@ -325,24 +325,7 @@ struct kvm_kernel_irq_routing_entry { ...@@ -325,24 +325,7 @@ struct kvm_kernel_irq_routing_entry {
struct hlist_node link; struct hlist_node link;
}; };
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING struct kvm_irq_routing_table;
struct kvm_irq_routing_table {
int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
struct kvm_kernel_irq_routing_entry *rt_entries;
u32 nr_rt_entries;
/*
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
struct hlist_head map[0];
};
#else
struct kvm_irq_routing_table {};
#endif
#ifndef KVM_PRIVATE_MEM_SLOTS #ifndef KVM_PRIVATE_MEM_SLOTS
#define KVM_PRIVATE_MEM_SLOTS 0 #define KVM_PRIVATE_MEM_SLOTS 0
...@@ -401,8 +384,7 @@ struct kvm { ...@@ -401,8 +384,7 @@ struct kvm {
struct mutex irq_lock; struct mutex irq_lock;
#ifdef CONFIG_HAVE_KVM_IRQCHIP #ifdef CONFIG_HAVE_KVM_IRQCHIP
/* /*
* Update side is protected by irq_lock and, * Update side is protected by irq_lock.
* if configured, irqfds.lock.
*/ */
struct kvm_irq_routing_table __rcu *irq_routing; struct kvm_irq_routing_table __rcu *irq_routing;
struct hlist_head mask_notifier_list; struct hlist_head mask_notifier_list;
...@@ -752,10 +734,9 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, ...@@ -752,10 +734,9 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
bool mask); bool mask);
int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries, int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_irq_routing_table *irq_rt, int gsi); struct kvm_kernel_irq_routing_entry *entries, int gsi);
int kvm_irq_map_chip_pin(struct kvm_irq_routing_table *irq_rt, int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
unsigned irqchip, unsigned pin);
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
bool line_status); bool line_status);
...@@ -967,7 +948,7 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); ...@@ -967,7 +948,7 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
#ifdef CONFIG_HAVE_KVM_IRQCHIP #ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm); void kvm_irqfd_release(struct kvm *kvm);
void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); void kvm_irq_routing_update(struct kvm *);
#else #else
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{ {
...@@ -989,10 +970,8 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) ...@@ -989,10 +970,8 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
static inline void kvm_irqfd_release(struct kvm *kvm) {} static inline void kvm_irqfd_release(struct kvm *kvm) {}
#ifdef CONFIG_HAVE_KVM_IRQCHIP #ifdef CONFIG_HAVE_KVM_IRQCHIP
static inline void kvm_irq_routing_update(struct kvm *kvm, static inline void kvm_irq_routing_update(struct kvm *kvm)
struct kvm_irq_routing_table *irq_rt)
{ {
rcu_assign_pointer(kvm->irq_routing, irq_rt);
} }
#endif #endif
......
...@@ -278,14 +278,13 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, ...@@ -278,14 +278,13 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
} }
/* Must be called under irqfds.lock */ /* Must be called under irqfds.lock */
static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd, static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd)
struct kvm_irq_routing_table *irq_rt)
{ {
struct kvm_kernel_irq_routing_entry *e; struct kvm_kernel_irq_routing_entry *e;
struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
int i, n_entries; int i, n_entries;
n_entries = kvm_irq_map_gsi(entries, irq_rt, irqfd->gsi); n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
write_seqcount_begin(&irqfd->irq_entry_sc); write_seqcount_begin(&irqfd->irq_entry_sc);
...@@ -304,12 +303,12 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd, ...@@ -304,12 +303,12 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
static int static int
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
{ {
struct kvm_irq_routing_table *irq_rt;
struct _irqfd *irqfd, *tmp; struct _irqfd *irqfd, *tmp;
struct fd f; struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret; int ret;
unsigned int events; unsigned int events;
int idx;
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
if (!irqfd) if (!irqfd)
...@@ -403,9 +402,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) ...@@ -403,9 +402,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
goto fail; goto fail;
} }
irq_rt = rcu_dereference_protected(kvm->irq_routing, idx = srcu_read_lock(&kvm->irq_srcu);
lockdep_is_held(&kvm->irqfds.lock)); irqfd_update(kvm, irqfd);
irqfd_update(kvm, irqfd, irq_rt); srcu_read_unlock(&kvm->irq_srcu, idx);
list_add_tail(&irqfd->list, &kvm->irqfds.items); list_add_tail(&irqfd->list, &kvm->irqfds.items);
...@@ -539,20 +538,17 @@ kvm_irqfd_release(struct kvm *kvm) ...@@ -539,20 +538,17 @@ kvm_irqfd_release(struct kvm *kvm)
} }
/* /*
* Change irq_routing and irqfd. * Take note of a change in irq routing.
* Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards. * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
*/ */
void kvm_irq_routing_update(struct kvm *kvm, void kvm_irq_routing_update(struct kvm *kvm)
struct kvm_irq_routing_table *irq_rt)
{ {
struct _irqfd *irqfd; struct _irqfd *irqfd;
spin_lock_irq(&kvm->irqfds.lock); spin_lock_irq(&kvm->irqfds.lock);
rcu_assign_pointer(kvm->irq_routing, irq_rt);
list_for_each_entry(irqfd, &kvm->irqfds.items, list) list_for_each_entry(irqfd, &kvm->irqfds.items, list)
irqfd_update(kvm, irqfd, irq_rt); irqfd_update(kvm, irqfd);
spin_unlock_irq(&kvm->irqfds.lock); spin_unlock_irq(&kvm->irqfds.lock);
} }
......
...@@ -163,7 +163,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) ...@@ -163,7 +163,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
struct kvm_kernel_irq_routing_entry *e; struct kvm_kernel_irq_routing_entry *e;
int ret = -EINVAL; int ret = -EINVAL;
struct kvm_irq_routing_table *irq_rt;
int idx; int idx;
trace_kvm_set_irq(irq, level, irq_source_id); trace_kvm_set_irq(irq, level, irq_source_id);
...@@ -177,8 +176,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) ...@@ -177,8 +176,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
* which is limited to 1:1 GSI mapping. * which is limited to 1:1 GSI mapping.
*/ */
idx = srcu_read_lock(&kvm->irq_srcu); idx = srcu_read_lock(&kvm->irq_srcu);
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); if (kvm_irq_map_gsi(kvm, entries, irq) > 0) {
if (kvm_irq_map_gsi(entries, irq_rt, irq) > 0) {
e = &entries[0]; e = &entries[0];
if (likely(e->type == KVM_IRQ_ROUTING_MSI)) if (likely(e->type == KVM_IRQ_ROUTING_MSI))
ret = kvm_set_msi_inatomic(e, kvm); ret = kvm_set_msi_inatomic(e, kvm);
...@@ -264,7 +262,7 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, ...@@ -264,7 +262,7 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
int idx, gsi; int idx, gsi;
idx = srcu_read_lock(&kvm->irq_srcu); idx = srcu_read_lock(&kvm->irq_srcu);
gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin]; gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
if (gsi != -1) if (gsi != -1)
hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link) hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
if (kimn->irq == gsi) if (kimn->irq == gsi)
......
...@@ -31,12 +31,26 @@ ...@@ -31,12 +31,26 @@
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
#include "irq.h" #include "irq.h"
int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries, struct kvm_irq_routing_table {
struct kvm_irq_routing_table *irq_rt, int gsi) int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
struct kvm_kernel_irq_routing_entry *rt_entries;
u32 nr_rt_entries;
/*
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
struct hlist_head map[0];
};
int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *entries, int gsi)
{ {
struct kvm_irq_routing_table *irq_rt;
struct kvm_kernel_irq_routing_entry *e; struct kvm_kernel_irq_routing_entry *e;
int n = 0; int n = 0;
irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
lockdep_is_held(&kvm->irq_lock));
if (gsi < irq_rt->nr_rt_entries) { if (gsi < irq_rt->nr_rt_entries) {
hlist_for_each_entry(e, &irq_rt->map[gsi], link) { hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
entries[n] = *e; entries[n] = *e;
...@@ -47,21 +61,21 @@ int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries, ...@@ -47,21 +61,21 @@ int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries,
return n; return n;
} }
int kvm_irq_map_chip_pin(struct kvm_irq_routing_table *irq_rt, int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
unsigned irqchip, unsigned pin)
{ {
struct kvm_irq_routing_table *irq_rt;
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
return irq_rt->chip[irqchip][pin]; return irq_rt->chip[irqchip][pin];
} }
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
{ {
struct kvm_irq_routing_table *irq_rt;
struct kvm_irq_ack_notifier *kian; struct kvm_irq_ack_notifier *kian;
int gsi, idx; int gsi, idx;
idx = srcu_read_lock(&kvm->irq_srcu); idx = srcu_read_lock(&kvm->irq_srcu);
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
gsi = kvm_irq_map_chip_pin(irq_rt, irqchip, pin);
if (gsi != -1) if (gsi != -1)
hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
link) link)
...@@ -78,15 +92,13 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier); ...@@ -78,15 +92,13 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
{ {
struct kvm_irq_routing_table *irq_rt;
struct kvm_irq_ack_notifier *kian; struct kvm_irq_ack_notifier *kian;
int gsi, idx; int gsi, idx;
trace_kvm_ack_irq(irqchip, pin); trace_kvm_ack_irq(irqchip, pin);
idx = srcu_read_lock(&kvm->irq_srcu); idx = srcu_read_lock(&kvm->irq_srcu);
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
gsi = kvm_irq_map_chip_pin(irq_rt, irqchip, pin);
if (gsi != -1) if (gsi != -1)
hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
link) link)
...@@ -143,7 +155,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, ...@@ -143,7 +155,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
{ {
struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS]; struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS];
int ret = -1, i, idx; int ret = -1, i, idx;
struct kvm_irq_routing_table *irq_rt;
trace_kvm_set_irq(irq, level, irq_source_id); trace_kvm_set_irq(irq, level, irq_source_id);
...@@ -152,8 +163,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, ...@@ -152,8 +163,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
* writes to the unused one. * writes to the unused one.
*/ */
idx = srcu_read_lock(&kvm->irq_srcu); idx = srcu_read_lock(&kvm->irq_srcu);
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); i = kvm_irq_map_gsi(kvm, irq_set, irq);
i = kvm_irq_map_gsi(irq_set, irq_rt, irq);
srcu_read_unlock(&kvm->irq_srcu, idx); srcu_read_unlock(&kvm->irq_srcu, idx);
while(i--) { while(i--) {
...@@ -250,7 +260,8 @@ int kvm_set_irq_routing(struct kvm *kvm, ...@@ -250,7 +260,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
mutex_lock(&kvm->irq_lock); mutex_lock(&kvm->irq_lock);
old = kvm->irq_routing; old = kvm->irq_routing;
kvm_irq_routing_update(kvm, new); rcu_assign_pointer(kvm->irq_routing, new);
kvm_irq_routing_update(kvm);
mutex_unlock(&kvm->irq_lock); mutex_unlock(&kvm->irq_lock);
synchronize_srcu_expedited(&kvm->irq_srcu); synchronize_srcu_expedited(&kvm->irq_srcu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment