Commit 742fb20f authored by Grygorii Strashko's avatar Grygorii Strashko Committed by David S. Miller

net: ethernet: ti: cpdma: switch to use genalloc

TI CPDMA currently uses a bitmap for tracking descriptors alloactions
allocations, but The genalloc already handles the same and can be used
as with special memory (SRAM) as with DMA cherent memory chank
(dma_alloc_coherent()). Hence, switch to using genalloc and add
desc_num property for each channel for limitation of max number of
allowed descriptors for each CPDMA channel. This patch do not affect
on net throuput.
Acked-by: default avatarMugunthan V N <mugunthanvnm@ti.com>
Tested-by: default avatarIvan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: default avatarGrygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c37d4a00
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/genalloc.h>
#include "davinci_cpdma.h" #include "davinci_cpdma.h"
/* DMA Registers */ /* DMA Registers */
...@@ -87,9 +87,8 @@ struct cpdma_desc_pool { ...@@ -87,9 +87,8 @@ struct cpdma_desc_pool {
void *cpumap; /* dma_alloc map */ void *cpumap; /* dma_alloc map */
int desc_size, mem_size; int desc_size, mem_size;
int num_desc, used_desc; int num_desc, used_desc;
unsigned long *bitmap;
struct device *dev; struct device *dev;
spinlock_t lock; struct gen_pool *gen_pool;
}; };
enum cpdma_state { enum cpdma_state {
...@@ -117,6 +116,7 @@ struct cpdma_chan { ...@@ -117,6 +116,7 @@ struct cpdma_chan {
int chan_num; int chan_num;
spinlock_t lock; spinlock_t lock;
int count; int count;
u32 desc_num;
u32 mask; u32 mask;
cpdma_handler_fn handler; cpdma_handler_fn handler;
enum dma_data_direction dir; enum dma_data_direction dir;
...@@ -145,6 +145,19 @@ struct cpdma_chan { ...@@ -145,6 +145,19 @@ struct cpdma_chan {
(directed << CPDMA_TO_PORT_SHIFT)); \ (directed << CPDMA_TO_PORT_SHIFT)); \
} while (0) } while (0)
static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
{
if (!pool)
return;
WARN_ON(pool->used_desc);
if (pool->cpumap)
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
pool->phys);
else
iounmap(pool->iomap);
}
/* /*
* Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
* emac) have dedicated on-chip memory for these descriptors. Some other * emac) have dedicated on-chip memory for these descriptors. Some other
...@@ -155,24 +168,25 @@ static struct cpdma_desc_pool * ...@@ -155,24 +168,25 @@ static struct cpdma_desc_pool *
cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
int size, int align) int size, int align)
{ {
int bitmap_size;
struct cpdma_desc_pool *pool; struct cpdma_desc_pool *pool;
int ret;
pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
if (!pool) if (!pool)
goto fail; goto gen_pool_create_fail;
spin_lock_init(&pool->lock);
pool->dev = dev; pool->dev = dev;
pool->mem_size = size; pool->mem_size = size;
pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
pool->num_desc = size / pool->desc_size; pool->num_desc = size / pool->desc_size;
bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); "cpdma");
if (!pool->bitmap) if (IS_ERR(pool->gen_pool)) {
goto fail; dev_err(dev, "pool create failed %ld\n",
PTR_ERR(pool->gen_pool));
goto gen_pool_create_fail;
}
if (phys) { if (phys) {
pool->phys = phys; pool->phys = phys;
...@@ -185,24 +199,22 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, ...@@ -185,24 +199,22 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
} }
if (pool->iomap) if (!pool->iomap)
return pool; goto gen_pool_create_fail;
fail:
return NULL;
}
static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
{ pool->phys, pool->mem_size, -1);
if (!pool) if (ret < 0) {
return; dev_err(dev, "pool add failed %d\n", ret);
goto gen_pool_add_virt_fail;
WARN_ON(pool->used_desc);
if (pool->cpumap) {
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
pool->phys);
} else {
iounmap(pool->iomap);
} }
return pool;
gen_pool_add_virt_fail:
cpdma_desc_pool_destroy(pool);
gen_pool_create_fail:
return NULL;
} }
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
...@@ -220,47 +232,23 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) ...@@ -220,47 +232,23 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
} }
static struct cpdma_desc __iomem * static struct cpdma_desc __iomem *
cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) cpdma_desc_alloc(struct cpdma_desc_pool *pool)
{ {
unsigned long flags;
int index;
int desc_start;
int desc_end;
struct cpdma_desc __iomem *desc = NULL; struct cpdma_desc __iomem *desc = NULL;
spin_lock_irqsave(&pool->lock, flags); desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool,
pool->desc_size);
if (is_rx) { if (desc)
desc_start = 0;
desc_end = pool->num_desc/2;
} else {
desc_start = pool->num_desc/2;
desc_end = pool->num_desc;
}
index = bitmap_find_next_zero_area(pool->bitmap,
desc_end, desc_start, num_desc, 0);
if (index < desc_end) {
bitmap_set(pool->bitmap, index, num_desc);
desc = pool->iomap + pool->desc_size * index;
pool->used_desc++; pool->used_desc++;
}
spin_unlock_irqrestore(&pool->lock, flags);
return desc; return desc;
} }
static void cpdma_desc_free(struct cpdma_desc_pool *pool, static void cpdma_desc_free(struct cpdma_desc_pool *pool,
struct cpdma_desc __iomem *desc, int num_desc) struct cpdma_desc __iomem *desc, int num_desc)
{ {
unsigned long flags, index; gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
index = ((unsigned long)desc - (unsigned long)pool->iomap) /
pool->desc_size;
spin_lock_irqsave(&pool->lock, flags);
bitmap_clear(pool->bitmap, index, num_desc);
pool->used_desc--; pool->used_desc--;
spin_unlock_irqrestore(&pool->lock, flags);
} }
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
...@@ -516,6 +504,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, ...@@ -516,6 +504,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
chan->state = CPDMA_STATE_IDLE; chan->state = CPDMA_STATE_IDLE;
chan->chan_num = chan_num; chan->chan_num = chan_num;
chan->handler = handler; chan->handler = handler;
chan->desc_num = ctlr->pool->num_desc / 2;
if (is_rx_chan(chan)) { if (is_rx_chan(chan)) {
chan->hdp = ctlr->params.rxhdp + offset; chan->hdp = ctlr->params.rxhdp + offset;
...@@ -681,7 +670,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, ...@@ -681,7 +670,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
goto unlock_ret; goto unlock_ret;
} }
desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); if (chan->count >= chan->desc_num) {
chan->stats.desc_alloc_fail++;
ret = -ENOMEM;
goto unlock_ret;
}
desc = cpdma_desc_alloc(ctlr->pool);
if (!desc) { if (!desc) {
chan->stats.desc_alloc_fail++; chan->stats.desc_alloc_fail++;
ret = -ENOMEM; ret = -ENOMEM;
...@@ -727,24 +722,16 @@ EXPORT_SYMBOL_GPL(cpdma_chan_submit); ...@@ -727,24 +722,16 @@ EXPORT_SYMBOL_GPL(cpdma_chan_submit);
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
{ {
unsigned long flags;
int index;
bool ret;
struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc_pool *pool = ctlr->pool; struct cpdma_desc_pool *pool = ctlr->pool;
bool free_tx_desc;
unsigned long flags;
spin_lock_irqsave(&pool->lock, flags); spin_lock_irqsave(&chan->lock, flags);
free_tx_desc = (chan->count < chan->desc_num) &&
index = bitmap_find_next_zero_area(pool->bitmap, gen_pool_avail(pool->gen_pool);
pool->num_desc, pool->num_desc/2, 1, 0); spin_unlock_irqrestore(&chan->lock, flags);
return free_tx_desc;
if (index < pool->num_desc)
ret = true;
else
ret = false;
spin_unlock_irqrestore(&pool->lock, flags);
return ret;
} }
EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment