Commit 3e3ab9cc authored by David S. Miller's avatar David S. Miller
parents 868c36dc ba804bb4
...@@ -769,7 +769,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) ...@@ -769,7 +769,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
/* /*
* Must be called with kvm->srcu held to avoid races on memslots, and with * Must be called with kvm->srcu held to avoid races on memslots, and with
* kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration. * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
*/ */
static int kvm_s390_vm_start_migration(struct kvm *kvm) static int kvm_s390_vm_start_migration(struct kvm *kvm)
{ {
...@@ -825,7 +825,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) ...@@ -825,7 +825,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
} }
/* /*
* Must be called with kvm->lock to avoid races with ourselves and * Must be called with kvm->slots_lock to avoid races with ourselves and
* kvm_s390_vm_start_migration. * kvm_s390_vm_start_migration.
*/ */
static int kvm_s390_vm_stop_migration(struct kvm *kvm) static int kvm_s390_vm_stop_migration(struct kvm *kvm)
...@@ -840,6 +840,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm) ...@@ -840,6 +840,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
if (kvm->arch.use_cmma) { if (kvm->arch.use_cmma) {
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
/* We have to wait for the essa emulation to finish */
synchronize_srcu(&kvm->srcu);
vfree(mgs->pgste_bitmap); vfree(mgs->pgste_bitmap);
} }
kfree(mgs); kfree(mgs);
...@@ -849,14 +851,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm) ...@@ -849,14 +851,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
static int kvm_s390_vm_set_migration(struct kvm *kvm, static int kvm_s390_vm_set_migration(struct kvm *kvm,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
int idx, res = -ENXIO; int res = -ENXIO;
mutex_lock(&kvm->lock); mutex_lock(&kvm->slots_lock);
switch (attr->attr) { switch (attr->attr) {
case KVM_S390_VM_MIGRATION_START: case KVM_S390_VM_MIGRATION_START:
idx = srcu_read_lock(&kvm->srcu);
res = kvm_s390_vm_start_migration(kvm); res = kvm_s390_vm_start_migration(kvm);
srcu_read_unlock(&kvm->srcu, idx);
break; break;
case KVM_S390_VM_MIGRATION_STOP: case KVM_S390_VM_MIGRATION_STOP:
res = kvm_s390_vm_stop_migration(kvm); res = kvm_s390_vm_stop_migration(kvm);
...@@ -864,7 +864,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm, ...@@ -864,7 +864,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm,
default: default:
break; break;
} }
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->slots_lock);
return res; return res;
} }
...@@ -1754,7 +1754,9 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -1754,7 +1754,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT; r = -EFAULT;
if (copy_from_user(&args, argp, sizeof(args))) if (copy_from_user(&args, argp, sizeof(args)))
break; break;
mutex_lock(&kvm->slots_lock);
r = kvm_s390_get_cmma_bits(kvm, &args); r = kvm_s390_get_cmma_bits(kvm, &args);
mutex_unlock(&kvm->slots_lock);
if (!r) { if (!r) {
r = copy_to_user(argp, &args, sizeof(args)); r = copy_to_user(argp, &args, sizeof(args));
if (r) if (r)
...@@ -1768,7 +1770,9 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -1768,7 +1770,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT; r = -EFAULT;
if (copy_from_user(&args, argp, sizeof(args))) if (copy_from_user(&args, argp, sizeof(args)))
break; break;
mutex_lock(&kvm->slots_lock);
r = kvm_s390_set_cmma_bits(kvm, &args); r = kvm_s390_set_cmma_bits(kvm, &args);
mutex_unlock(&kvm->slots_lock);
break; break;
} }
default: default:
......
...@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg) ...@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
struct nvkm_pci *pci = arg; struct nvkm_pci *pci = arg;
struct nvkm_device *device = pci->subdev.device; struct nvkm_device *device = pci->subdev.device;
bool handled = false; bool handled = false;
if (pci->irq < 0)
return IRQ_HANDLED;
nvkm_mc_intr_unarm(device); nvkm_mc_intr_unarm(device);
if (pci->msi) if (pci->msi)
pci->func->msi_rearm(pci); pci->func->msi_rearm(pci);
...@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend) ...@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
{ {
struct nvkm_pci *pci = nvkm_pci(subdev); struct nvkm_pci *pci = nvkm_pci(subdev);
if (pci->irq >= 0) {
free_irq(pci->irq, pci);
pci->irq = -1;
}
if (pci->agp.bridge) if (pci->agp.bridge)
nvkm_agp_fini(pci); nvkm_agp_fini(pci);
...@@ -108,8 +107,20 @@ static int ...@@ -108,8 +107,20 @@ static int
nvkm_pci_oneinit(struct nvkm_subdev *subdev) nvkm_pci_oneinit(struct nvkm_subdev *subdev)
{ {
struct nvkm_pci *pci = nvkm_pci(subdev); struct nvkm_pci *pci = nvkm_pci(subdev);
if (pci_is_pcie(pci->pdev)) struct pci_dev *pdev = pci->pdev;
return nvkm_pcie_oneinit(pci); int ret;
if (pci_is_pcie(pci->pdev)) {
ret = nvkm_pcie_oneinit(pci);
if (ret)
return ret;
}
ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
if (ret)
return ret;
pci->irq = pdev->irq;
return 0; return 0;
} }
...@@ -117,7 +128,6 @@ static int ...@@ -117,7 +128,6 @@ static int
nvkm_pci_init(struct nvkm_subdev *subdev) nvkm_pci_init(struct nvkm_subdev *subdev)
{ {
struct nvkm_pci *pci = nvkm_pci(subdev); struct nvkm_pci *pci = nvkm_pci(subdev);
struct pci_dev *pdev = pci->pdev;
int ret; int ret;
if (pci->agp.bridge) { if (pci->agp.bridge) {
...@@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev) ...@@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
if (pci->func->init) if (pci->func->init)
pci->func->init(pci); pci->func->init(pci);
ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
if (ret)
return ret;
pci->irq = pdev->irq;
/* Ensure MSI interrupts are armed, for the case where there are /* Ensure MSI interrupts are armed, for the case where there are
* already interrupts pending (for whatever reason) at load time. * already interrupts pending (for whatever reason) at load time.
*/ */
if (pci->msi) if (pci->msi)
pci->func->msi_rearm(pci); pci->func->msi_rearm(pci);
return ret; return 0;
} }
static void * static void *
nvkm_pci_dtor(struct nvkm_subdev *subdev) nvkm_pci_dtor(struct nvkm_subdev *subdev)
{ {
struct nvkm_pci *pci = nvkm_pci(subdev); struct nvkm_pci *pci = nvkm_pci(subdev);
nvkm_agp_dtor(pci); nvkm_agp_dtor(pci);
if (pci->irq >= 0) {
/* freq_irq() will call the handler, we use pci->irq == -1
* to signal that it's been torn down and should be a noop.
*/
int irq = pci->irq;
pci->irq = -1;
free_irq(irq, pci);
}
if (pci->msi) if (pci->msi)
pci_disable_msi(pci->pdev); pci_disable_msi(pci->pdev);
return nvkm_pci(subdev); return nvkm_pci(subdev);
} }
......
...@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev) ...@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
struct vc4_exec_info *exec[2]; struct vc4_exec_info *exec[2];
struct vc4_bo *bo; struct vc4_bo *bo;
unsigned long irqflags; unsigned long irqflags;
unsigned int i, j, unref_list_count, prev_idx; unsigned int i, j, k, unref_list_count;
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL); kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
if (!kernel_state) if (!kernel_state)
...@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev) ...@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
return; return;
} }
prev_idx = 0; k = 0;
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
if (!exec[i]) if (!exec[i])
continue; continue;
...@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev) ...@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev)
WARN_ON(!refcount_read(&bo->usecnt)); WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt); refcount_inc(&bo->usecnt);
drm_gem_object_get(&exec[i]->bo[j]->base); drm_gem_object_get(&exec[i]->bo[j]->base);
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; kernel_state->bo[k++] = &exec[i]->bo[j]->base;
} }
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
...@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev) ...@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev)
* because they are naturally unpurgeable. * because they are naturally unpurgeable.
*/ */
drm_gem_object_get(&bo->base.base); drm_gem_object_get(&bo->base.base);
kernel_state->bo[j + prev_idx] = &bo->base.base; kernel_state->bo[k++] = &bo->base.base;
j++;
} }
prev_idx = j + 1;
} }
WARN_ON_ONCE(k != state->bo_count);
if (exec[0]) if (exec[0])
state->start_bin = exec[0]->ct0ca; state->start_bin = exec[0]->ct0ca;
if (exec[1]) if (exec[1])
...@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev) ...@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC)); VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
} }
static void
vc4_flush_texture_caches(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
V3D_WRITE(V3D_L2CACTL,
V3D_L2CACTL_L2CCLR);
V3D_WRITE(V3D_SLCACTL,
VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
}
/* Sets the registers for the next job to be actually be executed in /* Sets the registers for the next job to be actually be executed in
* the hardware. * the hardware.
* *
...@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev) ...@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
if (!exec) if (!exec)
return; return;
/* A previous RCL may have written to one of our textures, and
* our full cache flush at bin time may have occurred before
* that RCL completed. Flush the texture cache now, but not
* the instructions or uniforms (since we don't write those
* from an RCL).
*/
vc4_flush_texture_caches(dev);
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea); submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
} }
......
...@@ -1456,8 +1456,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, ...@@ -1456,8 +1456,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);
int e = skb_queue_empty(&priv->cm.skb_queue); int e = skb_queue_empty(&priv->cm.skb_queue);
if (skb_dst(skb)) skb_dst_update_pmtu(skb, mtu);
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
skb_queue_tail(&priv->cm.skb_queue, skb); skb_queue_tail(&priv->cm.skb_queue, skb);
if (e) if (e)
......
...@@ -229,6 +229,7 @@ static const struct xpad_device { ...@@ -229,6 +229,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
...@@ -475,6 +476,22 @@ static const u8 xboxone_hori_init[] = { ...@@ -475,6 +476,22 @@ static const u8 xboxone_hori_init[] = {
0x00, 0x00, 0x00, 0x80, 0x00 0x00, 0x00, 0x00, 0x80, 0x00
}; };
/*
* This packet is required for some of the PDP pads to start
* sending input reports. One of those pads is (0x0e6f:0x02ab).
*/
static const u8 xboxone_pdp_init1[] = {
0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
};
/*
* This packet is required for some of the PDP pads to start
* sending input reports. One of those pads is (0x0e6f:0x02ab).
*/
static const u8 xboxone_pdp_init2[] = {
0x06, 0x20, 0x00, 0x02, 0x01, 0x00
};
/* /*
* A specific rumble packet is required for some PowerA pads to start * A specific rumble packet is required for some PowerA pads to start
* sending input reports. One of those pads is (0x24c6:0x543a). * sending input reports. One of those pads is (0x24c6:0x543a).
...@@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { ...@@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
......
This diff is collapsed.
...@@ -21,10 +21,16 @@ ...@@ -21,10 +21,16 @@
#define TP_COMMAND 0xE2 /* Commands start with this */ #define TP_COMMAND 0xE2 /* Commands start with this */
#define TP_READ_ID 0xE1 /* Sent for device identification */ #define TP_READ_ID 0xE1 /* Sent for device identification */
#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
/* by the firmware ID */
/* Firmware ID includes 0x1, 0x2, 0x3 */
/*
* Valid first byte responses to the "Read Secondary ID" (0xE1) command.
* 0x01 was the original IBM trackpoint, others implement very limited
* subset of trackpoint features.
*/
#define TP_VARIANT_IBM 0x01
#define TP_VARIANT_ALPS 0x02
#define TP_VARIANT_ELAN 0x03
#define TP_VARIANT_NXP 0x04
/* /*
* Commands * Commands
...@@ -136,18 +142,20 @@ ...@@ -136,18 +142,20 @@
#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd)) #define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
struct trackpoint_data struct trackpoint_data {
{ u8 variant_id;
unsigned char sensitivity, speed, inertia, reach; u8 firmware_id;
unsigned char draghys, mindrag;
unsigned char thresh, upthresh; u8 sensitivity, speed, inertia, reach;
unsigned char ztime, jenks; u8 draghys, mindrag;
unsigned char drift_time; u8 thresh, upthresh;
u8 ztime, jenks;
u8 drift_time;
/* toggles */ /* toggles */
unsigned char press_to_select; bool press_to_select;
unsigned char skipback; bool skipback;
unsigned char ext_dev; bool ext_dev;
}; };
#ifdef CONFIG_MOUSE_PS2_TRACKPOINT #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
......
...@@ -570,14 +570,19 @@ static int rmi_f01_probe(struct rmi_function *fn) ...@@ -570,14 +570,19 @@ static int rmi_f01_probe(struct rmi_function *fn)
dev_set_drvdata(&fn->dev, f01); dev_set_drvdata(&fn->dev, f01);
error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group); error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
if (error) if (error)
dev_warn(&fn->dev, dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error);
"Failed to create attribute group: %d\n", error);
return 0; return 0;
} }
static void rmi_f01_remove(struct rmi_function *fn)
{
/* Note that the bus device is used, not the F01 device */
sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
}
static int rmi_f01_config(struct rmi_function *fn) static int rmi_f01_config(struct rmi_function *fn)
{ {
struct f01_data *f01 = dev_get_drvdata(&fn->dev); struct f01_data *f01 = dev_get_drvdata(&fn->dev);
...@@ -717,6 +722,7 @@ struct rmi_function_handler rmi_f01_handler = { ...@@ -717,6 +722,7 @@ struct rmi_function_handler rmi_f01_handler = {
}, },
.func = 0x01, .func = 0x01,
.probe = rmi_f01_probe, .probe = rmi_f01_probe,
.remove = rmi_f01_remove,
.config = rmi_f01_config, .config = rmi_f01_config,
.attention = rmi_f01_attention, .attention = rmi_f01_attention,
.suspend = rmi_f01_suspend, .suspend = rmi_f01_suspend,
......
/* // SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2017 Samsung Electronics Co., Ltd. // Samsung S6SY761 Touchscreen device driver
* Author: Andi Shyti <andi.shyti@samsung.com> //
* // Copyright (c) 2017 Samsung Electronics Co., Ltd.
* This program is free software; you can redistribute it and/or modify // Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Samsung S6SY761 Touchscreen device driver
*/
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <linux/delay.h> #include <linux/delay.h>
......
/* // SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2017 Samsung Electronics Co., Ltd. // STMicroelectronics FTS Touchscreen device driver
* Author: Andi Shyti <andi.shyti@samsung.com> //
* // Copyright (c) 2017 Samsung Electronics Co., Ltd.
* This program is free software; you can redistribute it and/or modify // Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* STMicroelectronics FTS Touchscreen device driver
*/
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/i2c.h> #include <linux/i2c.h>
......
...@@ -2235,19 +2235,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) ...@@ -2235,19 +2235,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
void __iomem *ioaddr = tp->mmio_addr; void __iomem *ioaddr = tp->mmio_addr;
dma_addr_t paddr = tp->counters_phys_addr; dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd; u32 cmd;
bool ret;
RTL_W32(CounterAddrHigh, (u64)paddr >> 32); RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
RTL_R32(CounterAddrHigh);
cmd = (u64)paddr & DMA_BIT_MASK(32); cmd = (u64)paddr & DMA_BIT_MASK(32);
RTL_W32(CounterAddrLow, cmd); RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | counter_cmd); RTL_W32(CounterAddrLow, cmd | counter_cmd);
ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
return ret;
} }
static bool rtl8169_reset_counters(struct net_device *dev) static bool rtl8169_reset_counters(struct net_device *dev)
......
...@@ -829,7 +829,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, ...@@ -829,7 +829,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) - int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
GENEVE_BASE_HLEN - info->options_len - 14; GENEVE_BASE_HLEN - info->options_len - 14;
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); skb_dst_update_pmtu(skb, mtu);
} }
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
...@@ -875,7 +875,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ...@@ -875,7 +875,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) - int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
GENEVE_BASE_HLEN - info->options_len - 14; GENEVE_BASE_HLEN - info->options_len - 14;
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); skb_dst_update_pmtu(skb, mtu);
} }
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
......
...@@ -673,8 +673,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, ...@@ -673,8 +673,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
struct sock *sk, struct sock *sk,
struct sk_buff *skb) struct sk_buff *skb)
{ {
/* don't divert multicast */ /* don't divert multicast or local broadcast */
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb; return skb;
if (qdisc_tx_is_default(vrf_dev)) if (qdisc_tx_is_default(vrf_dev))
......
...@@ -2158,8 +2158,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2158,8 +2158,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (skb_dst(skb)) { if (skb_dst(skb)) {
int mtu = dst_mtu(ndst) - VXLAN_HEADROOM; int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb_dst_update_pmtu(skb, mtu);
skb, mtu);
} }
tos = ip_tunnel_ecn_encap(tos, old_iph, skb); tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
...@@ -2200,8 +2199,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2200,8 +2199,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (skb_dst(skb)) { if (skb_dst(skb)) {
int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM; int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb_dst_update_pmtu(skb, mtu);
skb, mtu);
} }
tos = ip_tunnel_ecn_encap(tos, old_iph, skb); tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
......
...@@ -1633,28 +1633,18 @@ void btrfs_readdir_put_delayed_items(struct inode *inode, ...@@ -1633,28 +1633,18 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
int btrfs_should_delete_dir_index(struct list_head *del_list, int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index) u64 index)
{ {
struct btrfs_delayed_item *curr, *next; struct btrfs_delayed_item *curr;
int ret; int ret = 0;
if (list_empty(del_list))
return 0;
list_for_each_entry_safe(curr, next, del_list, readdir_list) { list_for_each_entry(curr, del_list, readdir_list) {
if (curr->key.offset > index) if (curr->key.offset > index)
break; break;
if (curr->key.offset == index) {
list_del(&curr->readdir_list); ret = 1;
ret = (curr->key.offset == index); break;
}
if (refcount_dec_and_test(&curr->refs))
kfree(curr);
if (ret)
return 1;
else
continue;
} }
return 0; return ret;
} }
/* /*
......
...@@ -452,7 +452,7 @@ ssize_t orangefs_inode_read(struct inode *inode, ...@@ -452,7 +452,7 @@ ssize_t orangefs_inode_read(struct inode *inode,
static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
loff_t pos = *(&iocb->ki_pos); loff_t pos = iocb->ki_pos;
ssize_t rc = 0; ssize_t rc = 0;
BUG_ON(iocb->private); BUG_ON(iocb->private);
...@@ -492,9 +492,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite ...@@ -492,9 +492,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
} }
} }
if (file->f_pos > i_size_read(file->f_mapping->host))
orangefs_i_size_write(file->f_mapping->host, file->f_pos);
rc = generic_write_checks(iocb, iter); rc = generic_write_checks(iocb, iter);
if (rc <= 0) { if (rc <= 0) {
...@@ -508,7 +505,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite ...@@ -508,7 +505,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
* pos to the end of the file, so we will wait till now to set * pos to the end of the file, so we will wait till now to set
* pos... * pos...
*/ */
pos = *(&iocb->ki_pos); pos = iocb->ki_pos;
rc = do_readv_writev(ORANGEFS_IO_WRITE, rc = do_readv_writev(ORANGEFS_IO_WRITE,
file, file,
......
...@@ -533,17 +533,6 @@ do { \ ...@@ -533,17 +533,6 @@ do { \
sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \ sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \
} while (0) } while (0)
static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
{
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
inode_lock(inode);
#endif
i_size_write(inode, i_size);
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
inode_unlock(inode);
#endif
}
static inline void orangefs_set_timeout(struct dentry *dentry) static inline void orangefs_set_timeout(struct dentry *dentry)
{ {
unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
......
...@@ -504,4 +504,12 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) ...@@ -504,4 +504,12 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
} }
#endif #endif
static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
{
struct dst_entry *dst = skb_dst(skb);
if (dst && dst->ops->update_pmtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu);
}
#endif /* _NET_DST_H */ #endif /* _NET_DST_H */
...@@ -223,6 +223,11 @@ int net_eq(const struct net *net1, const struct net *net2) ...@@ -223,6 +223,11 @@ int net_eq(const struct net *net1, const struct net *net2)
return net1 == net2; return net1 == net2;
} }
static inline int check_net(const struct net *net)
{
return refcount_read(&net->count) != 0;
}
void net_drop_ns(void *); void net_drop_ns(void *);
#else #else
...@@ -247,6 +252,11 @@ int net_eq(const struct net *net1, const struct net *net2) ...@@ -247,6 +252,11 @@ int net_eq(const struct net *net1, const struct net *net2)
return 1; return 1;
} }
static inline int check_net(const struct net *net)
{
return 1;
}
#define net_drop_ns NULL #define net_drop_ns NULL
#endif #endif
......
...@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t) ...@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
ccid2_pr_debug("RTO_EXPIRE\n"); ccid2_pr_debug("RTO_EXPIRE\n");
if (sk->sk_state == DCCP_CLOSED)
goto out;
/* back-off timer */ /* back-off timer */
hc->tx_rto <<= 1; hc->tx_rto <<= 1;
if (hc->tx_rto > DCCP_RTO_MAX) if (hc->tx_rto > DCCP_RTO_MAX)
......
...@@ -520,8 +520,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, ...@@ -520,8 +520,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
else else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
if (skb_dst(skb)) skb_dst_update_pmtu(skb, mtu);
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
if (!skb_is_gso(skb) && if (!skb_is_gso(skb) &&
......
...@@ -200,7 +200,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -200,7 +200,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(dst); mtu = dst_mtu(dst);
if (skb->len > mtu) { if (skb->len > mtu) {
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); skb_dst_update_pmtu(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu)); htonl(mtu));
......
...@@ -2322,6 +2322,9 @@ void tcp_close(struct sock *sk, long timeout) ...@@ -2322,6 +2322,9 @@ void tcp_close(struct sock *sk, long timeout)
tcp_send_active_reset(sk, GFP_ATOMIC); tcp_send_active_reset(sk, GFP_ATOMIC);
__NET_INC_STATS(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONMEMORY); LINUX_MIB_TCPABORTONMEMORY);
} else if (!check_net(sock_net(sk))) {
/* Not possible to send reset; just close */
tcp_set_state(sk, TCP_CLOSE);
} }
} }
......
...@@ -48,11 +48,19 @@ static void tcp_write_err(struct sock *sk) ...@@ -48,11 +48,19 @@ static void tcp_write_err(struct sock *sk)
* to prevent DoS attacks. It is called when a retransmission timeout * to prevent DoS attacks. It is called when a retransmission timeout
* or zero probe timeout occurs on orphaned socket. * or zero probe timeout occurs on orphaned socket.
* *
* Also close if our net namespace is exiting; in that case there is no
* hope of ever communicating again since all netns interfaces are already
* down (or about to be down), and we need to release our dst references,
* which have been moved to the netns loopback interface, so the namespace
* can finish exiting. This condition is only possible if we are a kernel
* socket, as those do not hold references to the namespace.
*
* Criteria is still not confirmed experimentally and may change. * Criteria is still not confirmed experimentally and may change.
* We kill the socket, if: * We kill the socket, if:
* 1. If number of orphaned sockets exceeds an administratively configured * 1. If number of orphaned sockets exceeds an administratively configured
* limit. * limit.
* 2. If we have strong memory pressure. * 2. If we have strong memory pressure.
* 3. If our net namespace is exiting.
*/ */
static int tcp_out_of_resources(struct sock *sk, bool do_reset) static int tcp_out_of_resources(struct sock *sk, bool do_reset)
{ {
...@@ -81,6 +89,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) ...@@ -81,6 +89,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
return 1; return 1;
} }
if (!check_net(sock_net(sk))) {
/* Not possible to send reset; just close */
tcp_done(sk);
return 1;
}
return 0; return 0;
} }
......
...@@ -642,8 +642,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -642,8 +642,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rel_info > dst_mtu(skb_dst(skb2))) if (rel_info > dst_mtu(skb_dst(skb2)))
goto out; goto out;
skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, skb_dst_update_pmtu(skb2, rel_info);
rel_info);
} }
icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
...@@ -1134,8 +1133,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, ...@@ -1134,8 +1133,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
mtu = 576; mtu = 576;
} }
if (skb_dst(skb) && !t->parms.collect_md) skb_dst_update_pmtu(skb, mtu);
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu; *pmtu = mtu;
err = -EMSGSIZE; err = -EMSGSIZE;
......
...@@ -483,7 +483,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) ...@@ -483,7 +483,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
mtu = dst_mtu(dst); mtu = dst_mtu(dst);
if (!skb->ignore_df && skb->len > mtu) { if (!skb->ignore_df && skb->len > mtu) {
skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); skb_dst_update_pmtu(skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) { if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU) if (mtu < IPV6_MIN_MTU)
......
...@@ -934,8 +934,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -934,8 +934,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
df = 0; df = 0;
} }
if (tunnel->parms.iph.daddr && skb_dst(skb)) if (tunnel->parms.iph.daddr)
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); skb_dst_update_pmtu(skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) { if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
......
...@@ -951,7 +951,7 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock, ...@@ -951,7 +951,7 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock,
* POLLOUT|POLLWRNORM when peer is closed and nothing to read, * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
* but local send is not shutdown. * but local send is not shutdown.
*/ */
if (sk->sk_state == TCP_CLOSE) { if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) if (!(sk->sk_shutdown & SEND_SHUTDOWN))
mask |= POLLOUT | POLLWRNORM; mask |= POLLOUT | POLLWRNORM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment