Commit dc021644 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'core/mutexes' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into drm-next

Merge in the tip core/mutexes branch for future GPU driver use.

Ingo will send this branch to Linus prior to drm-next.

* 'core/mutexes' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  locking-selftests: Handle unexpected failures more strictly
  mutex: Add more w/w tests to test EDEADLK path handling
  mutex: Add more tests to lib/locking-selftest.c
  mutex: Add w/w tests to lib/locking-selftest.c
  mutex: Add w/w mutex slowpath debugging
  mutex: Add support for wound/wait style locks
  arch: Make __mutex_fastpath_lock_retval return whether fastpath succeeded or not
  powerpc/pci: Fix boot panic on mpc83xx (regression)
  s390/ipl: Fix FCP WWPN and LUN format strings for read
  fs: fix new splice.c kernel-doc warning
  spi/pxa2xx: fix memory corruption due to wrong size used in devm_kzalloc()
  s390/mem_detect: fix memory hole handling
  s390/dma: support debug_dma_mapping_error
  s390/dma: fix mapping_error detection
  s390/irq: Only define synchronize_irq() on SMP
  Input: xpad - fix for "Mad Catz Street Fighter IV FightPad" controllers
  Input: wacom - add a new stylus (0x100802) for Intuos5 and Cintiqs
  spi/pxa2xx: use GFP_ATOMIC in sg table allocation
  fuse: hold i_mutex in fuse_file_fallocate()
  Input: add missing dependencies on CONFIG_HAS_IOMEM
  ...
parents 4300a0f8 166989e3
This diff is collapsed.
...@@ -29,17 +29,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -29,17 +29,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if * Change the count from 1 to a value lower than 1. This function returns 0
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns.
*/ */
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
return fail_fn(count); return -1;
return 0; return 0;
} }
......
...@@ -82,17 +82,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -82,17 +82,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if * Change the count from 1 to a value lower than 1. This function returns 0
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns.
*/ */
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
if (unlikely(__mutex_dec_return_lock(count) < 0)) if (unlikely(__mutex_dec_return_lock(count) < 0))
return fail_fn(count); return -1;
return 0; return 0;
} }
......
...@@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn, ...@@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
return indirect_read_config(bus, devfn, offset, len, val); return indirect_read_config(bus, devfn, offset, len, val);
} }
static struct pci_ops fsl_indirect_pci_ops = #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
static struct pci_ops fsl_indirect_pcie_ops =
{ {
.read = fsl_indirect_read_config, .read = fsl_indirect_read_config,
.write = indirect_write_config, .write = indirect_write_config,
}; };
static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
resource_size_t cfg_addr,
resource_size_t cfg_data, u32 flags)
{
setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
hose->ops = &fsl_indirect_pci_ops;
}
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
#define MAX_PHYS_ADDR_BITS 40 #define MAX_PHYS_ADDR_BITS 40
static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
...@@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary) ...@@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
if (!hose->private_data) if (!hose->private_data)
goto no_bridge; goto no_bridge;
fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
PPC_INDIRECT_TYPE_BIG_ENDIAN); PPC_INDIRECT_TYPE_BIG_ENDIAN);
if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
/* use fsl_indirect_read_config for PCIe */
hose->ops = &fsl_indirect_pcie_ops;
/* For PCIE read HEADER_TYPE to identify controler mode */ /* For PCIE read HEADER_TYPE to identify controler mode */
early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
...@@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev) ...@@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
if (ret) if (ret)
goto err0; goto err0;
} else { } else {
fsl_setup_indirect_pci(hose, rsrc_cfg.start, setup_indirect_pci(hose, rsrc_cfg.start,
rsrc_cfg.start + 4, 0); rsrc_cfg.start + 4, 0);
} }
printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
......
...@@ -50,9 +50,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -50,9 +50,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); struct dma_map_ops *dma_ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, dma_addr);
if (dma_ops->mapping_error) if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr); return dma_ops->mapping_error(dev, dma_addr);
return (dma_addr == 0UL); return (dma_addr == DMA_ERROR_CODE);
} }
static inline void *dma_alloc_coherent(struct device *dev, size_t size, static inline void *dma_alloc_coherent(struct device *dev, size_t size,
......
...@@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = { ...@@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
.write = reipl_fcp_scpdata_write, .write = reipl_fcp_scpdata_write,
}; };
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
reipl_block_fcp->ipl_info.fcp.wwpn); reipl_block_fcp->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
reipl_block_fcp->ipl_info.fcp.lun); reipl_block_fcp->ipl_info.fcp.lun);
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
reipl_block_fcp->ipl_info.fcp.bootprog); reipl_block_fcp->ipl_info.fcp.bootprog);
...@@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = { ...@@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = {
/* FCP dump device attributes */ /* FCP dump device attributes */
DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
dump_block_fcp->ipl_info.fcp.wwpn); dump_block_fcp->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
dump_block_fcp->ipl_info.fcp.lun); dump_block_fcp->ipl_info.fcp.lun);
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
dump_block_fcp->ipl_info.fcp.bootprog); dump_block_fcp->ipl_info.fcp.bootprog);
......
...@@ -312,6 +312,7 @@ void measurement_alert_subclass_unregister(void) ...@@ -312,6 +312,7 @@ void measurement_alert_subclass_unregister(void)
} }
EXPORT_SYMBOL(measurement_alert_subclass_unregister); EXPORT_SYMBOL(measurement_alert_subclass_unregister);
#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq) void synchronize_irq(unsigned int irq)
{ {
/* /*
...@@ -320,6 +321,7 @@ void synchronize_irq(unsigned int irq) ...@@ -320,6 +321,7 @@ void synchronize_irq(unsigned int irq)
*/ */
} }
EXPORT_SYMBOL_GPL(synchronize_irq); EXPORT_SYMBOL_GPL(synchronize_irq);
#endif
#ifndef CONFIG_PCI #ifndef CONFIG_PCI
......
...@@ -123,7 +123,8 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, ...@@ -123,7 +123,8 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
continue; continue;
} else if ((addr <= chunk->addr) && } else if ((addr <= chunk->addr) &&
(addr + size >= chunk->addr + chunk->size)) { (addr + size >= chunk->addr + chunk->size)) {
memset(chunk, 0 , sizeof(*chunk)); memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
} else if (addr + size < chunk->addr + chunk->size) { } else if (addr + size < chunk->addr + chunk->size) {
chunk->size = chunk->addr + chunk->size - addr - size; chunk->size = chunk->addr + chunk->size - addr - size;
chunk->addr = addr + size; chunk->addr = addr + size;
......
...@@ -37,7 +37,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -37,7 +37,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
} }
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
int __done, __res; int __done, __res;
...@@ -51,7 +51,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) ...@@ -51,7 +51,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
: "t"); : "t");
if (unlikely(!__done || __res != 0)) if (unlikely(!__done || __res != 0))
__res = fail_fn(count); __res = -1;
return __res; return __res;
} }
......
...@@ -42,17 +42,14 @@ do { \ ...@@ -42,17 +42,14 @@ do { \
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if it * Change the count from 1 to a value lower than 1. This function returns 0
* wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns
*/ */
static inline int __mutex_fastpath_lock_retval(atomic_t *count, static inline int __mutex_fastpath_lock_retval(atomic_t *count)
int (*fail_fn)(atomic_t *))
{ {
if (unlikely(atomic_dec_return(count) < 0)) if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count); return -1;
else else
return 0; return 0;
} }
......
...@@ -37,17 +37,14 @@ do { \ ...@@ -37,17 +37,14 @@ do { \
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if * Change the count from 1 to a value lower than 1. This function returns 0
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns
*/ */
static inline int __mutex_fastpath_lock_retval(atomic_t *count, static inline int __mutex_fastpath_lock_retval(atomic_t *count)
int (*fail_fn)(atomic_t *))
{ {
if (unlikely(atomic_dec_return(count) < 0)) if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count); return -1;
else else
return 0; return 0;
} }
......
...@@ -137,7 +137,7 @@ static const struct xpad_device { ...@@ -137,7 +137,7 @@ static const struct xpad_device {
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 }, { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
......
...@@ -431,6 +431,7 @@ config KEYBOARD_TEGRA ...@@ -431,6 +431,7 @@ config KEYBOARD_TEGRA
config KEYBOARD_OPENCORES config KEYBOARD_OPENCORES
tristate "OpenCores Keyboard Controller" tristate "OpenCores Keyboard Controller"
depends on HAS_IOMEM
help help
Say Y here if you want to use the OpenCores Keyboard Controller Say Y here if you want to use the OpenCores Keyboard Controller
http://www.opencores.org/project,keyboardcontroller http://www.opencores.org/project,keyboardcontroller
......
...@@ -205,6 +205,7 @@ config SERIO_XILINX_XPS_PS2 ...@@ -205,6 +205,7 @@ config SERIO_XILINX_XPS_PS2
config SERIO_ALTERA_PS2 config SERIO_ALTERA_PS2
tristate "Altera UP PS/2 controller" tristate "Altera UP PS/2 controller"
depends on HAS_IOMEM
help help
Say Y here if you have Altera University Program PS/2 ports. Say Y here if you have Altera University Program PS/2 ports.
......
...@@ -363,6 +363,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) ...@@ -363,6 +363,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
case 0x160802: /* Cintiq 13HD Pro Pen */ case 0x160802: /* Cintiq 13HD Pro Pen */
case 0x180802: /* DTH2242 Pen */ case 0x180802: /* DTH2242 Pen */
case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
wacom->tool[idx] = BTN_TOOL_PEN; wacom->tool[idx] = BTN_TOOL_PEN;
break; break;
...@@ -401,6 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) ...@@ -401,6 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
case 0x18080a: /* DTH2242 Eraser */ case 0x18080a: /* DTH2242 Eraser */
case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
wacom->tool[idx] = BTN_TOOL_RUBBER; wacom->tool[idx] = BTN_TOOL_RUBBER;
break; break;
......
...@@ -116,6 +116,15 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd) ...@@ -116,6 +116,15 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
} }
static int cyttsp_handshake(struct cyttsp *ts)
{
if (ts->pdata->use_hndshk)
return ttsp_send_command(ts,
ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
return 0;
}
static int cyttsp_load_bl_regs(struct cyttsp *ts) static int cyttsp_load_bl_regs(struct cyttsp *ts)
{ {
memset(&ts->bl_data, 0, sizeof(ts->bl_data)); memset(&ts->bl_data, 0, sizeof(ts->bl_data));
...@@ -133,7 +142,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts) ...@@ -133,7 +142,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
memcpy(bl_cmd, bl_command, sizeof(bl_command)); memcpy(bl_cmd, bl_command, sizeof(bl_command));
if (ts->pdata->bl_keys) if (ts->pdata->bl_keys)
memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
ts->pdata->bl_keys, sizeof(bl_command)); ts->pdata->bl_keys, CY_NUM_BL_KEYS);
error = ttsp_write_block_data(ts, CY_REG_BASE, error = ttsp_write_block_data(ts, CY_REG_BASE,
sizeof(bl_cmd), bl_cmd); sizeof(bl_cmd), bl_cmd);
...@@ -167,6 +176,10 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts) ...@@ -167,6 +176,10 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts)
if (error) if (error)
return error; return error;
error = cyttsp_handshake(ts);
if (error)
return error;
return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0;
} }
...@@ -188,6 +201,10 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts) ...@@ -188,6 +201,10 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts)
if (error) if (error)
return error; return error;
error = cyttsp_handshake(ts);
if (error)
return error;
if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl)
return -EIO; return -EIO;
...@@ -344,12 +361,9 @@ static irqreturn_t cyttsp_irq(int irq, void *handle) ...@@ -344,12 +361,9 @@ static irqreturn_t cyttsp_irq(int irq, void *handle)
goto out; goto out;
/* provide flow control handshake */ /* provide flow control handshake */
if (ts->pdata->use_hndshk) { error = cyttsp_handshake(ts);
error = ttsp_send_command(ts, if (error)
ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); goto out;
if (error)
goto out;
}
if (unlikely(ts->state == CY_IDLE_STATE)) if (unlikely(ts->state == CY_IDLE_STATE))
goto out; goto out;
......
...@@ -67,8 +67,8 @@ struct cyttsp_xydata { ...@@ -67,8 +67,8 @@ struct cyttsp_xydata {
/* TTSP System Information interface definition */ /* TTSP System Information interface definition */
struct cyttsp_sysinfo_data { struct cyttsp_sysinfo_data {
u8 hst_mode; u8 hst_mode;
u8 mfg_cmd;
u8 mfg_stat; u8 mfg_stat;
u8 mfg_cmd;
u8 cid[3]; u8 cid[3];
u8 tt_undef1; u8 tt_undef1;
u8 uid[8]; u8 uid[8];
......
...@@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, ...@@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
int ret; int ret;
sg_free_table(sgt); sg_free_table(sgt);
ret = sg_alloc_table(sgt, nents, GFP_KERNEL); ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) ...@@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
return NULL; return NULL;
pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) { if (!pdata) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"failed to allocate memory for platform data\n"); "failed to allocate memory for platform data\n");
......
...@@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) ...@@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
} }
ret = pm_runtime_get_sync(&sdd->pdev->dev); ret = pm_runtime_get_sync(&sdd->pdev->dev);
if (ret != 0) { if (ret < 0) {
dev_err(dev, "Failed to enable device: %d\n", ret); dev_err(dev, "Failed to enable device: %d\n", ret);
goto out_tx; goto out_tx;
} }
......
...@@ -2470,13 +2470,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, ...@@ -2470,13 +2470,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
.mode = mode .mode = mode
}; };
int err; int err;
bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
(mode & FALLOC_FL_PUNCH_HOLE);
if (fc->no_fallocate) if (fc->no_fallocate)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (mode & FALLOC_FL_PUNCH_HOLE) { if (lock_inode) {
mutex_lock(&inode->i_mutex); mutex_lock(&inode->i_mutex);
fuse_set_nowrite(inode); if (mode & FALLOC_FL_PUNCH_HOLE)
fuse_set_nowrite(inode);
} }
req = fuse_get_req_nopages(fc); req = fuse_get_req_nopages(fc);
...@@ -2511,8 +2514,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, ...@@ -2511,8 +2514,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
fuse_invalidate_attr(inode); fuse_invalidate_attr(inode);
out: out:
if (mode & FALLOC_FL_PUNCH_HOLE) { if (lock_inode) {
fuse_release_nowrite(inode); if (mode & FALLOC_FL_PUNCH_HOLE)
fuse_release_nowrite(inode);
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
} }
......
...@@ -1283,6 +1283,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, ...@@ -1283,6 +1283,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
* @in: file to splice from * @in: file to splice from
* @ppos: input file offset * @ppos: input file offset
* @out: file to splice to * @out: file to splice to
* @opos: output file offset
* @len: number of bytes to splice * @len: number of bytes to splice
* @flags: splice modifier flags * @flags: splice modifier flags
* *
......
...@@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if * Change the count from 1 to a value lower than 1. This function returns 0
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns.
*/ */
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
if (unlikely(atomic_dec_return(count) < 0)) if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count); return -1;
return 0; return 0;
} }
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#define _ASM_GENERIC_MUTEX_NULL_H #define _ASM_GENERIC_MUTEX_NULL_H
#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) #define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) #define __mutex_fastpath_lock_retval(count) (-1)
#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) #define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) #define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
#define __mutex_slowpath_needs_to_unlock() 1 #define __mutex_slowpath_needs_to_unlock() 1
......
...@@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if it * Change the count from 1 to a value lower than 1. This function returns 0
* wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns
*/ */
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
if (unlikely(atomic_xchg(count, 0) != 1)) if (unlikely(atomic_xchg(count, 0) != 1))
if (likely(atomic_xchg(count, -1) != 1)) if (likely(atomic_xchg(count, -1) != 1))
return fail_fn(count); return -1;
return 0; return 0;
} }
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/debug_locks.h>
/* /*
* Mutexes - debugging helpers: * Mutexes - debugging helpers:
......
This diff is collapsed.
This diff is collapsed.
...@@ -547,6 +547,19 @@ config DEBUG_MUTEXES ...@@ -547,6 +547,19 @@ config DEBUG_MUTEXES
This feature allows mutex semantics violations to be detected and This feature allows mutex semantics violations to be detected and
reported. reported.
config DEBUG_WW_MUTEX_SLOWPATH
bool "Wait/wound mutex debugging: Slowpath testing"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select DEBUG_LOCK_ALLOC
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
help
This feature enables slowpath testing for w/w mutex users by
injecting additional -EDEADLK wound/backoff cases. Together with
the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
will test all possible w/w mutex interface abuse with the
exception of simply not acquiring all the required locks.
config DEBUG_LOCK_ALLOC config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks" bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
......
...@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(debug_locks); ...@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(debug_locks);
* a locking bug is detected. * a locking bug is detected.
*/ */
int debug_locks_silent; int debug_locks_silent;
EXPORT_SYMBOL_GPL(debug_locks_silent);
/* /*
* Generic 'turn off all lock debugging' function: * Generic 'turn off all lock debugging' function:
...@@ -44,3 +45,4 @@ int debug_locks_off(void) ...@@ -44,3 +45,4 @@ int debug_locks_off(void)
} }
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(debug_locks_off);
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment