Commit 0e3d5b21 authored by Vinod Koul's avatar Vinod Koul

Merge branch 'topic/ioatdma' into for-linus

parents 8795d143 c997e30e
This diff is collapsed.
...@@ -62,7 +62,6 @@ enum ioat_irq_mode { ...@@ -62,7 +62,6 @@ enum ioat_irq_mode {
* struct ioatdma_device - internal representation of a IOAT device * struct ioatdma_device - internal representation of a IOAT device
* @pdev: PCI-Express device * @pdev: PCI-Express device
* @reg_base: MMIO register space base address * @reg_base: MMIO register space base address
* @dma_pool: for allocating DMA descriptors
* @completion_pool: DMA buffers for completion ops * @completion_pool: DMA buffers for completion ops
* @sed_hw_pool: DMA super descriptor pools * @sed_hw_pool: DMA super descriptor pools
* @dma_dev: embedded struct dma_device * @dma_dev: embedded struct dma_device
...@@ -76,8 +75,7 @@ enum ioat_irq_mode { ...@@ -76,8 +75,7 @@ enum ioat_irq_mode {
struct ioatdma_device { struct ioatdma_device {
struct pci_dev *pdev; struct pci_dev *pdev;
void __iomem *reg_base; void __iomem *reg_base;
struct pci_pool *dma_pool; struct dma_pool *completion_pool;
struct pci_pool *completion_pool;
#define MAX_SED_POOLS 5 #define MAX_SED_POOLS 5
struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
struct dma_device dma_dev; struct dma_device dma_dev;
...@@ -88,6 +86,16 @@ struct ioatdma_device { ...@@ -88,6 +86,16 @@ struct ioatdma_device {
struct dca_provider *dca; struct dca_provider *dca;
enum ioat_irq_mode irq_mode; enum ioat_irq_mode irq_mode;
u32 cap; u32 cap;
/* shadow version for CB3.3 chan reset errata workaround */
u64 msixtba0;
u64 msixdata0;
u32 msixpba;
};
struct ioat_descs {
void *virt;
dma_addr_t hw;
}; };
struct ioatdma_chan { struct ioatdma_chan {
...@@ -100,7 +108,6 @@ struct ioatdma_chan { ...@@ -100,7 +108,6 @@ struct ioatdma_chan {
#define IOAT_COMPLETION_ACK 1 #define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2 #define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3 #define IOAT_KOBJ_INIT_FAIL 3
#define IOAT_RESHAPE_PENDING 4
#define IOAT_RUN 5 #define IOAT_RUN 5
#define IOAT_CHAN_ACTIVE 6 #define IOAT_CHAN_ACTIVE 6
struct timer_list timer; struct timer_list timer;
...@@ -133,6 +140,8 @@ struct ioatdma_chan { ...@@ -133,6 +140,8 @@ struct ioatdma_chan {
u16 produce; u16 produce;
struct ioat_ring_ent **ring; struct ioat_ring_ent **ring;
spinlock_t prep_lock; spinlock_t prep_lock;
struct ioat_descs descs[2];
int desc_chunks;
}; };
struct ioat_sysfs_entry { struct ioat_sysfs_entry {
...@@ -302,10 +311,8 @@ static inline bool is_ioat_bug(unsigned long err) ...@@ -302,10 +311,8 @@ static inline bool is_ioat_bug(unsigned long err)
} }
#define IOAT_MAX_ORDER 16 #define IOAT_MAX_ORDER 16
#define ioat_get_alloc_order() \ #define IOAT_MAX_DESCS 65536
(min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) #define IOAT_DESCS_PER_2M 32768
#define ioat_get_max_alloc_order() \
(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{ {
......
...@@ -73,6 +73,8 @@ ...@@ -73,6 +73,8 @@
int system_has_dca_enabled(struct pci_dev *pdev); int system_has_dca_enabled(struct pci_dev *pdev);
#define IOAT_DESC_SZ 64
struct ioat_dma_descriptor { struct ioat_dma_descriptor {
uint32_t size; uint32_t size;
union { union {
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/dca.h> #include <linux/dca.h>
#include <linux/aer.h> #include <linux/aer.h>
#include <linux/sizes.h>
#include "dma.h" #include "dma.h"
#include "registers.h" #include "registers.h"
#include "hw.h" #include "hw.h"
...@@ -136,14 +137,6 @@ int ioat_pending_level = 4; ...@@ -136,14 +137,6 @@ int ioat_pending_level = 4;
module_param(ioat_pending_level, int, 0644); module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level, MODULE_PARM_DESC(ioat_pending_level,
"high-water mark for pushing ioat descriptors (default: 4)"); "high-water mark for pushing ioat descriptors (default: 4)");
int ioat_ring_alloc_order = 8;
module_param(ioat_ring_alloc_order, int, 0644);
MODULE_PARM_DESC(ioat_ring_alloc_order,
"ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
module_param(ioat_ring_max_alloc_order, int, 0644);
MODULE_PARM_DESC(ioat_ring_max_alloc_order,
"ioat+: upper limit for ring size (default: 16)");
static char ioat_interrupt_style[32] = "msix"; static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style, module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644); sizeof(ioat_interrupt_style), 0644);
...@@ -504,23 +497,14 @@ static int ioat_probe(struct ioatdma_device *ioat_dma) ...@@ -504,23 +497,14 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
struct pci_dev *pdev = ioat_dma->pdev; struct pci_dev *pdev = ioat_dma->pdev;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
/* DMA coherent memory pool for DMA descriptor allocations */ ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
sizeof(struct ioat_dma_descriptor),
64, 0);
if (!ioat_dma->dma_pool) {
err = -ENOMEM;
goto err_dma_pool;
}
ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
sizeof(u64), sizeof(u64),
SMP_CACHE_BYTES, SMP_CACHE_BYTES,
SMP_CACHE_BYTES); SMP_CACHE_BYTES);
if (!ioat_dma->completion_pool) { if (!ioat_dma->completion_pool) {
err = -ENOMEM; err = -ENOMEM;
goto err_completion_pool; goto err_out;
} }
ioat_enumerate_channels(ioat_dma); ioat_enumerate_channels(ioat_dma);
...@@ -546,10 +530,8 @@ static int ioat_probe(struct ioatdma_device *ioat_dma) ...@@ -546,10 +530,8 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
err_self_test: err_self_test:
ioat_disable_interrupts(ioat_dma); ioat_disable_interrupts(ioat_dma);
err_setup_interrupts: err_setup_interrupts:
pci_pool_destroy(ioat_dma->completion_pool); dma_pool_destroy(ioat_dma->completion_pool);
err_completion_pool: err_out:
pci_pool_destroy(ioat_dma->dma_pool);
err_dma_pool:
return err; return err;
} }
...@@ -559,8 +541,7 @@ static int ioat_register(struct ioatdma_device *ioat_dma) ...@@ -559,8 +541,7 @@ static int ioat_register(struct ioatdma_device *ioat_dma)
if (err) { if (err) {
ioat_disable_interrupts(ioat_dma); ioat_disable_interrupts(ioat_dma);
pci_pool_destroy(ioat_dma->completion_pool); dma_pool_destroy(ioat_dma->completion_pool);
pci_pool_destroy(ioat_dma->dma_pool);
} }
return err; return err;
...@@ -576,8 +557,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) ...@@ -576,8 +557,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
dma_async_device_unregister(dma); dma_async_device_unregister(dma);
pci_pool_destroy(ioat_dma->dma_pool); dma_pool_destroy(ioat_dma->completion_pool);
pci_pool_destroy(ioat_dma->completion_pool);
INIT_LIST_HEAD(&dma->channels); INIT_LIST_HEAD(&dma->channels);
} }
...@@ -666,10 +646,19 @@ static void ioat_free_chan_resources(struct dma_chan *c) ...@@ -666,10 +646,19 @@ static void ioat_free_chan_resources(struct dma_chan *c)
ioat_free_ring_ent(desc, c); ioat_free_ring_ent(desc, c);
} }
for (i = 0; i < ioat_chan->desc_chunks; i++) {
dma_free_coherent(to_dev(ioat_chan), SZ_2M,
ioat_chan->descs[i].virt,
ioat_chan->descs[i].hw);
ioat_chan->descs[i].virt = NULL;
ioat_chan->descs[i].hw = 0;
}
ioat_chan->desc_chunks = 0;
kfree(ioat_chan->ring); kfree(ioat_chan->ring);
ioat_chan->ring = NULL; ioat_chan->ring = NULL;
ioat_chan->alloc_order = 0; ioat_chan->alloc_order = 0;
pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
ioat_chan->completion_dma); ioat_chan->completion_dma);
spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock); spin_unlock_bh(&ioat_chan->cleanup_lock);
...@@ -701,7 +690,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) ...@@ -701,7 +690,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
/* allocate a completion writeback area */ /* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion = ioat_chan->completion =
pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
GFP_KERNEL, &ioat_chan->completion_dma); GFP_KERNEL, &ioat_chan->completion_dma);
if (!ioat_chan->completion) if (!ioat_chan->completion)
return -ENOMEM; return -ENOMEM;
...@@ -712,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) ...@@ -712,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
writel(((u64)ioat_chan->completion_dma) >> 32, writel(((u64)ioat_chan->completion_dma) >> 32,
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
order = ioat_get_alloc_order(); order = IOAT_MAX_ORDER;
ring = ioat_alloc_ring(c, order, GFP_KERNEL); ring = ioat_alloc_ring(c, order, GFP_KERNEL);
if (!ring) if (!ring)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment