Commit ab6e1f37 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.20a-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "Several fixes, mostly for rather recent regressions when running under
  Xen"

* tag 'for-linus-4.20a-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: remove size limit of privcmd-buf mapping interface
  xen: fix xen_qlock_wait()
  x86/xen: fix pv boot
  xen-blkfront: fix kernel panic with negotiate_mq error path
  xen/grant-table: Fix incorrect gnttab_dma_free_pages() pr_debug message
  CONFIG_XEN_PV breaks xen_create_contiguous_region on ARM
parents 35c55685 3941552a
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/uaccess.h> #include <asm/extable.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
*/ */
static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
{ {
return __put_user(val, (unsigned long __user *)addr); int ret = 0;
asm volatile("1: mov %[val], %[ptr]\n"
"2:\n"
".section .fixup, \"ax\"\n"
"3: sub $1, %[ret]\n"
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
: [ret] "+r" (ret), [ptr] "=m" (*addr)
: [val] "r" (val));
return ret;
} }
static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val) static inline int xen_safe_read_ulong(const unsigned long *addr,
unsigned long *val)
{ {
return __get_user(*val, (unsigned long __user *)addr); int ret = 0;
unsigned long rval = ~0ul;
asm volatile("1: mov %[ptr], %[rval]\n"
"2:\n"
".section .fixup, \"ax\"\n"
"3: sub $1, %[ret]\n"
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
: [ret] "+r" (ret), [rval] "+r" (rval)
: [ptr] "m" (*addr));
*val = rval;
return ret;
} }
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
......
...@@ -656,8 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -656,8 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
/* /*
* The interface requires atomic updates on p2m elements. * The interface requires atomic updates on p2m elements.
* xen_safe_write_ulong() is using __put_user which does an atomic * xen_safe_write_ulong() is using an atomic store via asm().
* store via asm().
*/ */
if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn))) if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
return true; return true;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/qspinlock.h> #include <asm/qspinlock.h>
...@@ -21,6 +22,7 @@ ...@@ -21,6 +22,7 @@
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name); static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
static bool xen_pvspin = true; static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu) static void xen_qlock_kick(int cpu)
...@@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu) ...@@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu)
*/ */
static void xen_qlock_wait(u8 *byte, u8 val) static void xen_qlock_wait(u8 *byte, u8 val)
{ {
unsigned long flags;
int irq = __this_cpu_read(lock_kicker_irq); int irq = __this_cpu_read(lock_kicker_irq);
atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
/* If kicker interrupts not initialized yet, just spin */ /* If kicker interrupts not initialized yet, just spin */
if (irq == -1 || in_nmi()) if (irq == -1 || in_nmi())
return; return;
/* Guard against reentry. */ /* Detect reentry. */
local_irq_save(flags); atomic_inc(nest_cnt);
/* If irq pending already clear it. */ /* If irq pending already and no nested call clear it. */
if (xen_test_irq_pending(irq)) { if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
xen_clear_irq_pending(irq); xen_clear_irq_pending(irq);
} else if (READ_ONCE(*byte) == val) { } else if (READ_ONCE(*byte) == val) {
/* Block until irq becomes pending (or a spurious wakeup) */ /* Block until irq becomes pending (or a spurious wakeup) */
xen_poll_irq(irq); xen_poll_irq(irq);
} }
local_irq_restore(flags); atomic_dec(nest_cnt);
} }
static irqreturn_t dummy_handler(int irq, void *dev_id) static irqreturn_t dummy_handler(int irq, void *dev_id)
......
...@@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info) ...@@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info)
GFP_KERNEL); GFP_KERNEL);
if (!info->rinfo) { if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
info->nr_rings = 0;
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args) ...@@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
ret = xenmem_reservation_increase(args->nr_pages, args->frames); ret = xenmem_reservation_increase(args->nr_pages, args->frames);
if (ret != args->nr_pages) { if (ret != args->nr_pages) {
pr_debug("Failed to decrease reservation for DMA buffer\n"); pr_debug("Failed to increase reservation for DMA buffer\n");
ret = -EFAULT; ret = -EFAULT;
} else { } else {
ret = 0; ret = 0;
......
...@@ -21,15 +21,9 @@ ...@@ -21,15 +21,9 @@
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static unsigned int limit = 64;
module_param(limit, uint, 0644);
MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
"the privcmd-buf device per open file");
struct privcmd_buf_private { struct privcmd_buf_private {
struct mutex lock; struct mutex lock;
struct list_head list; struct list_head list;
unsigned int allocated;
}; };
struct privcmd_buf_vma_private { struct privcmd_buf_vma_private {
...@@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv) ...@@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
{ {
unsigned int i; unsigned int i;
vma_priv->file_priv->allocated -= vma_priv->n_pages;
list_del(&vma_priv->list); list_del(&vma_priv->list);
for (i = 0; i < vma_priv->n_pages; i++) for (i = 0; i < vma_priv->n_pages; i++)
if (vma_priv->pages[i]) __free_page(vma_priv->pages[i]);
__free_page(vma_priv->pages[i]);
kfree(vma_priv); kfree(vma_priv);
} }
...@@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
unsigned int i; unsigned int i;
int ret = 0; int ret = 0;
if (!(vma->vm_flags & VM_SHARED) || count > limit || if (!(vma->vm_flags & VM_SHARED))
file_priv->allocated + count > limit)
return -EINVAL; return -EINVAL;
vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
...@@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
if (!vma_priv) if (!vma_priv)
return -ENOMEM; return -ENOMEM;
vma_priv->n_pages = count; for (i = 0; i < count; i++) {
count = 0;
for (i = 0; i < vma_priv->n_pages; i++) {
vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!vma_priv->pages[i]) if (!vma_priv->pages[i])
break; break;
count++; vma_priv->n_pages++;
} }
mutex_lock(&file_priv->lock); mutex_lock(&file_priv->lock);
file_priv->allocated += count;
vma_priv->file_priv = file_priv; vma_priv->file_priv = file_priv;
vma_priv->users = 1; vma_priv->users = 1;
......
...@@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void); ...@@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap; extern unsigned long *xen_contiguous_bitmap;
#ifdef CONFIG_XEN_PV #if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits, unsigned int address_bits,
dma_addr_t *dma_handle); dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
unsigned int domid, bool no_translate, struct page **pages);
#else #else
static inline int xen_create_contiguous_region(phys_addr_t pstart, static inline int xen_create_contiguous_region(phys_addr_t pstart,
unsigned int order, unsigned int order,
...@@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart, ...@@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart,
static inline void xen_destroy_contiguous_region(phys_addr_t pstart, static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
unsigned int order) { } unsigned int order) { }
#endif
#if defined(CONFIG_XEN_PV)
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
unsigned int domid, bool no_translate, struct page **pages);
#else
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, xen_pfn_t *pfn, int nr, int *err_ptr,
pgprot_t prot, unsigned int domid, pgprot_t prot, unsigned int domid,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment