Commit 0e1dc427 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.1b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel:

 - fix blkback regression if using persistent grants

 - fix various event channel related suspend/resume bugs

 - fix AMD x86 regression with X86_BUG_SYSRET_SS_ATTRS

 - SWIOTLB on ARM now uses frames <4 GiB (if available) so device only
   capable of 32-bit DMA work.

* tag 'for-linus-4.1b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: Add __GFP_DMA flag when xen_swiotlb_init gets free pages on ARM
  hypervisor/x86/xen: Unset X86_BUG_SYSRET_SS_ATTRS on Xen PV guests
  xen/events: Set irq_info->evtchn before binding the channel to CPU in __startup_pirq()
  xen/console: Update console event channel on resume
  xen/xenbus: Update xenbus event channel on resume
  xen/events: Clear cpu_evtchn_mask before resuming
  xen-pciback: Add name prefix to global 'permissive' variable
  xen: Suspend ticks on all CPUs during suspend
  xen/grant: introduce func gnttab_unmap_refs_sync()
  xen/blkback: safely unmap purge persistent grants
parents 3d54ac9e 8746515d
......@@ -110,5 +110,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
unsigned long mfn);
unsigned long xen_get_swiotlb_free_pages(unsigned int order);
#endif /* _ASM_ARM_XEN_PAGE_H */
......@@ -4,6 +4,7 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/types.h>
......@@ -21,6 +22,20 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
{
struct memblock_region *reg;
gfp_t flags = __GFP_NOWARN;
for_each_memblock(memory, reg) {
if (reg->base < (phys_addr_t)0xffffffff) {
flags |= __GFP_DMA;
break;
}
}
return __get_free_pages(flags, order);
}
enum dma_cache_op {
DMA_UNMAP,
DMA_MAP,
......
......@@ -50,7 +50,7 @@ extern const struct hypervisor_x86 *x86_hyper;
/* Recognized hypervisors */
extern const struct hypervisor_x86 x86_hyper_vmware;
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
extern const struct hypervisor_x86 x86_hyper_xen;
extern const struct hypervisor_x86 x86_hyper_kvm;
extern void init_hypervisor(struct cpuinfo_x86 *c);
......
......@@ -269,4 +269,9 @@ static inline bool xen_arch_need_swiotlb(struct device *dev,
return false;
}
static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
{
return __get_free_pages(__GFP_NOWARN, order);
}
#endif /* _ASM_X86_XEN_PAGE_H */
......@@ -27,8 +27,8 @@
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
#ifdef CONFIG_XEN_PVHVM
&x86_hyper_xen_hvm,
#ifdef CONFIG_XEN
&x86_hyper_xen,
#endif
&x86_hyper_vmware,
&x86_hyper_ms_hyperv,
......
......@@ -1760,6 +1760,9 @@ static struct notifier_block xen_hvm_cpu_notifier = {
static void __init xen_hvm_guest_init(void)
{
if (xen_pv_domain())
return;
init_hvm_pv_info();
xen_hvm_init_shared_info();
......@@ -1775,6 +1778,7 @@ static void __init xen_hvm_guest_init(void)
xen_hvm_init_time_ops();
xen_hvm_init_mmu_ops();
}
#endif
static bool xen_nopv = false;
static __init int xen_parse_nopv(char *arg)
......@@ -1784,14 +1788,11 @@ static __init int xen_parse_nopv(char *arg)
}
early_param("xen_nopv", xen_parse_nopv);
static uint32_t __init xen_hvm_platform(void)
static uint32_t __init xen_platform(void)
{
if (xen_nopv)
return 0;
if (xen_pv_domain())
return 0;
return xen_cpuid_base();
}
......@@ -1809,11 +1810,19 @@ bool xen_hvm_need_lapic(void)
}
EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
.name = "Xen HVM",
.detect = xen_hvm_platform,
static void xen_set_cpu_features(struct cpuinfo_x86 *c)
{
if (xen_pv_domain())
clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
}
const struct hypervisor_x86 x86_hyper_xen = {
.name = "Xen",
.detect = xen_platform,
#ifdef CONFIG_XEN_PVHVM
.init_platform = xen_hvm_guest_init,
#endif
.x2apic_available = xen_x2apic_para_available,
.set_cpu_features = xen_set_cpu_features,
};
EXPORT_SYMBOL(x86_hyper_xen_hvm);
#endif
EXPORT_SYMBOL(x86_hyper_xen);
......@@ -88,7 +88,17 @@ static void xen_vcpu_notify_restore(void *data)
tick_resume_local();
}
static void xen_vcpu_notify_suspend(void *data)
{
tick_suspend_local();
}
void xen_arch_resume(void)
{
on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
}
void xen_arch_suspend(void)
{
on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
}
......@@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
atomic_dec(&blkif->persistent_gnt_in_use);
}
static void free_persistent_gnts_unmap_callback(int result,
struct gntab_unmap_queue_data *data)
{
struct completion *c = data->data;
/* BUG_ON used to reproduce existing behaviour,
but is this the best way to deal with this? */
BUG_ON(result);
complete(c);
}
static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
unsigned int num)
{
......@@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
struct rb_node *n;
int segs_to_unmap = 0;
struct gntab_unmap_queue_data unmap_data;
struct completion unmap_completion;
init_completion(&unmap_completion);
unmap_data.data = &unmap_completion;
unmap_data.done = &free_persistent_gnts_unmap_callback;
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
......@@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
!rb_next(&persistent_gnt->node)) {
unmap_data.count = segs_to_unmap;
gnttab_unmap_refs_async(&unmap_data);
wait_for_completion(&unmap_completion);
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
......@@ -329,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
int ret, segs_to_unmap = 0;
int segs_to_unmap = 0;
struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
struct gntab_unmap_queue_data unmap_data;
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
while(!list_empty(&blkif->persistent_purge_list)) {
persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
......@@ -346,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
ret = gnttab_unmap_refs(unmap, NULL, pages,
segs_to_unmap);
BUG_ON(ret);
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
}
kfree(persistent_gnt);
}
if (segs_to_unmap > 0) {
ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
BUG_ON(ret);
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap);
}
}
......
......@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void)
return 0;
}
static void xen_console_update_evtchn(struct xencons_info *info)
{
if (xen_hvm_domain()) {
uint64_t v;
int err;
err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
if (!err && v)
info->evtchn = v;
} else
info->evtchn = xen_start_info->console.domU.evtchn;
}
void xen_console_resume(void)
{
struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
if (info != NULL && info->irq)
if (info != NULL && info->irq) {
if (!xen_initial_domain())
xen_console_update_evtchn(info);
rebind_evtchn_irq(info->evtchn, info->irq);
}
}
static void xencons_disconnect_backend(struct xencons_info *info)
......
......@@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static void evtchn_2l_resume(void)
{
int i;
for_each_online_cpu(i)
memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
}
static const struct evtchn_ops evtchn_ops_2l = {
.max_channels = evtchn_2l_max_channels,
.nr_channels = evtchn_2l_max_channels,
......@@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = {
.mask = evtchn_2l_mask,
.unmask = evtchn_2l_unmask,
.handle_events = evtchn_2l_handle_events,
.resume = evtchn_2l_resume,
};
void __init xen_evtchn_2l_init(void)
......
......@@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq)
if (rc)
goto err;
bind_evtchn_to_cpu(evtchn, 0);
info->evtchn = evtchn;
bind_evtchn_to_cpu(evtchn, 0);
rc = xen_evtchn_port_setup(info);
if (rc)
......@@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
mutex_unlock(&irq_mapping_update_lock);
/* new event channels are always bound to cpu 0 */
irq_set_affinity(irq, cpumask_of(0));
bind_evtchn_to_cpu(evtchn, info->cpu);
/* This will be deferred until interrupt is processed */
irq_set_affinity(irq, cpumask_of(info->cpu));
/* Unmask the event channel. */
enable_irq(irq);
......
......@@ -327,30 +327,10 @@ static int map_grant_pages(struct grant_map *map)
return err;
}
struct unmap_grant_pages_callback_data
{
struct completion completion;
int result;
};
static void unmap_grant_callback(int result,
struct gntab_unmap_queue_data *data)
{
struct unmap_grant_pages_callback_data* d = data->data;
d->result = result;
complete(&d->completion);
}
static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
int i, err = 0;
struct gntab_unmap_queue_data unmap_data;
struct unmap_grant_pages_callback_data data;
init_completion(&data.completion);
unmap_data.data = &data;
unmap_data.done= &unmap_grant_callback;
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
......@@ -367,11 +347,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
unmap_data.pages = map->pages + offset;
unmap_data.count = pages;
gnttab_unmap_refs_async(&unmap_data);
wait_for_completion(&data.completion);
if (data.result)
return data.result;
err = gnttab_unmap_refs_sync(&unmap_data);
if (err)
return err;
for (i = 0; i < pages; i++) {
if (map->unmap_ops[offset+i].status)
......
......@@ -123,6 +123,11 @@ struct gnttab_ops {
int (*query_foreign_access)(grant_ref_t ref);
};
struct unmap_refs_callback_data {
struct completion completion;
int result;
};
static struct gnttab_ops *gnttab_interface;
static int grant_table_version;
......@@ -863,6 +868,29 @@ void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
static void unmap_refs_callback(int result,
struct gntab_unmap_queue_data *data)
{
struct unmap_refs_callback_data *d = data->data;
d->result = result;
complete(&d->completion);
}
int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
{
struct unmap_refs_callback_data data;
init_completion(&data.completion);
item->data = &data;
item->done = &unmap_refs_callback;
gnttab_unmap_refs_async(item);
wait_for_completion(&data.completion);
return data.result;
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
{
int rc;
......
......@@ -131,6 +131,8 @@ static void do_suspend(void)
goto out_resume;
}
xen_arch_suspend();
si.cancelled = 1;
err = stop_machine(xen_suspend, &si, cpumask_of(0));
......@@ -148,11 +150,12 @@ static void do_suspend(void)
si.cancelled = 1;
}
xen_arch_resume();
out_resume:
if (!si.cancelled) {
xen_arch_resume();
if (!si.cancelled)
xs_resume();
} else
else
xs_suspend_cancel();
dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
......
......@@ -235,7 +235,7 @@ int __ref xen_swiotlb_init(int verbose, bool early)
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
if (xen_io_tlb_start)
break;
order--;
......
......@@ -16,8 +16,8 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
bool permissive;
module_param(permissive, bool, 0644);
bool xen_pcibk_permissive;
module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
* xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
......@@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
* This means that some fields may still be read-only because
* they have entries in the config_field list that intercept
* the write and do nothing. */
if (dev_data->permissive || permissive) {
if (dev_data->permissive || xen_pcibk_permissive) {
switch (size) {
case 1:
err = pci_write_config_byte(dev, offset,
......
......@@ -64,7 +64,7 @@ struct config_field_entry {
void *data;
};
extern bool permissive;
extern bool xen_pcibk_permissive;
#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
......
......@@ -118,7 +118,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
cmd->val = value;
if (!permissive && (!dev_data || !dev_data->permissive))
if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
return 0;
/* Only allow the guest to control certain bits. */
......
......@@ -57,6 +57,7 @@
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/xen-ops.h>
#include <xen/page.h>
#include <xen/hvm.h>
......@@ -735,6 +736,30 @@ static int __init xenstored_local_init(void)
return err;
}
static int xenbus_resume_cb(struct notifier_block *nb,
unsigned long action, void *data)
{
int err = 0;
if (xen_hvm_domain()) {
uint64_t v;
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
if (!err && v)
xen_store_evtchn = v;
else
pr_warn("Cannot update xenstore event channel: %d\n",
err);
} else
xen_store_evtchn = xen_start_info->store_evtchn;
return err;
}
static struct notifier_block xenbus_resume_nb = {
.notifier_call = xenbus_resume_cb,
};
static int __init xenbus_init(void)
{
int err = 0;
......@@ -793,6 +818,10 @@ static int __init xenbus_init(void)
goto out_error;
}
if ((xen_store_domain_type != XS_LOCAL) &&
(xen_store_domain_type != XS_UNKNOWN))
xen_resume_notifier_register(&xenbus_resume_nb);
#ifdef CONFIG_XEN_COMPAT_XENFS
/*
* Create xenfs mountpoint in /proc for compatibility with
......
......@@ -191,6 +191,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
/* Perform a batch of grant map/copy operations. Retry every batch slot
......
......@@ -13,6 +13,7 @@ void xen_arch_post_suspend(int suspend_cancelled);
void xen_timer_resume(void);
void xen_arch_resume(void);
void xen_arch_suspend(void);
void xen_resume_notifier_register(struct notifier_block *nb);
void xen_resume_notifier_unregister(struct notifier_block *nb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment