Commit ed58a112 authored by Vinod Koul's avatar Vinod Koul

Merge branch 'topic/iommu' into for-linus

parents f4929080 2e0cc304
...@@ -277,14 +277,26 @@ and <size> parameters are provided to do partial page mapping, it is ...@@ -277,14 +277,26 @@ and <size> parameters are provided to do partial page mapping, it is
recommended that you never use these unless you really know what the recommended that you never use these unless you really know what the
cache width is. cache width is.
dma_addr_t
dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
void
dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
API for mapping and unmapping for MMIO resources. All the notes and
warnings for the other mapping APIs apply here. The API should only be
used to map device MMIO resources, mapping of RAM is not permitted.
int int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr) dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
In some circumstances dma_map_single() and dma_map_page() will fail to create In some circumstances dma_map_single(), dma_map_page() and dma_map_resource()
a mapping. A driver can check for these errors by testing the returned will fail to create a mapping. A driver can check for these errors by testing
DMA address with dma_mapping_error(). A non-zero return value means the mapping the returned DMA address with dma_mapping_error(). A non-zero return value
could not be created and the driver should take appropriate action (e.g. means the mapping could not be created and the driver should take appropriate
reduce current DMA mapping usage or delay and try again later). action (e.g. reduce current DMA mapping usage or delay and try again later).
int int
dma_map_sg(struct device *dev, struct scatterlist *sg, dma_map_sg(struct device *dev, struct scatterlist *sg,
......
...@@ -2014,6 +2014,63 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, ...@@ -2014,6 +2014,63 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
__free_iova(mapping, iova, len); __free_iova(mapping, iova, len);
} }
/**
* arm_iommu_map_resource - map a device resource for DMA
* @dev: valid struct device pointer
* @phys_addr: physical address of resource
* @size: size of resource to map
* @dir: DMA transfer direction
*/
static dma_addr_t arm_iommu_map_resource(struct device *dev,
phys_addr_t phys_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t dma_addr;
int ret, prot;
phys_addr_t addr = phys_addr & PAGE_MASK;
unsigned int offset = phys_addr & ~PAGE_MASK;
size_t len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
if (ret < 0)
goto fail;
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
return DMA_ERROR_CODE;
}
/**
* arm_iommu_unmap_resource - unmap a device DMA resource
* @dev: valid struct device pointer
* @dma_handle: DMA address to resource
* @size: size of resource to map
* @dir: DMA transfer direction
*/
static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = dma_handle & PAGE_MASK;
unsigned int offset = dma_handle & ~PAGE_MASK;
size_t len = PAGE_ALIGN(size + offset);
if (!iova)
return;
iommu_unmap(mapping->domain, iova, len);
__free_iova(mapping, iova, len);
}
static void arm_iommu_sync_single_for_cpu(struct device *dev, static void arm_iommu_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) dma_addr_t handle, size_t size, enum dma_data_direction dir)
{ {
...@@ -2057,6 +2114,9 @@ struct dma_map_ops iommu_ops = { ...@@ -2057,6 +2114,9 @@ struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg, .unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device, .sync_sg_for_device = arm_iommu_sync_sg_for_device,
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
}; };
struct dma_map_ops iommu_coherent_ops = { struct dma_map_ops iommu_coherent_ops = {
...@@ -2070,6 +2130,9 @@ struct dma_map_ops iommu_coherent_ops = { ...@@ -2070,6 +2130,9 @@ struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg, .map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg, .unmap_sg = arm_coherent_iommu_unmap_sg,
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
}; };
/** /**
......
...@@ -117,15 +117,35 @@ struct rcar_dmac_desc_page { ...@@ -117,15 +117,35 @@ struct rcar_dmac_desc_page {
((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
sizeof(struct rcar_dmac_xfer_chunk)) sizeof(struct rcar_dmac_xfer_chunk))
/*
* struct rcar_dmac_chan_slave - Slave configuration
* @slave_addr: slave memory address
* @xfer_size: size (in bytes) of hardware transfers
*/
struct rcar_dmac_chan_slave {
phys_addr_t slave_addr;
unsigned int xfer_size;
};
/*
* struct rcar_dmac_chan_map - Map of slave device phys to dma address
* @addr: slave dma address
* @dir: direction of mapping
* @slave: slave configuration that is mapped
*/
struct rcar_dmac_chan_map {
dma_addr_t addr;
enum dma_data_direction dir;
struct rcar_dmac_chan_slave slave;
};
/* /*
* struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
* @chan: base DMA channel object * @chan: base DMA channel object
* @iomem: channel I/O memory base * @iomem: channel I/O memory base
* @index: index of this channel in the controller * @index: index of this channel in the controller
* @src_xfer_size: size (in bytes) of hardware transfers on the source side * @src: slave memory address and size on the source side
* @dst_xfer_size: size (in bytes) of hardware transfers on the destination side * @dst: slave memory address and size on the destination side
* @src_slave_addr: slave source memory address
* @dst_slave_addr: slave destination memory address
* @mid_rid: hardware MID/RID for the DMA client using this channel * @mid_rid: hardware MID/RID for the DMA client using this channel
* @lock: protects the channel CHCR register and the desc members * @lock: protects the channel CHCR register and the desc members
* @desc.free: list of free descriptors * @desc.free: list of free descriptors
...@@ -142,10 +162,9 @@ struct rcar_dmac_chan { ...@@ -142,10 +162,9 @@ struct rcar_dmac_chan {
void __iomem *iomem; void __iomem *iomem;
unsigned int index; unsigned int index;
unsigned int src_xfer_size; struct rcar_dmac_chan_slave src;
unsigned int dst_xfer_size; struct rcar_dmac_chan_slave dst;
dma_addr_t src_slave_addr; struct rcar_dmac_chan_map map;
dma_addr_t dst_slave_addr;
int mid_rid; int mid_rid;
spinlock_t lock; spinlock_t lock;
...@@ -793,13 +812,13 @@ static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, ...@@ -793,13 +812,13 @@ static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
case DMA_DEV_TO_MEM: case DMA_DEV_TO_MEM:
chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
| RCAR_DMACHCR_RS_DMARS; | RCAR_DMACHCR_RS_DMARS;
xfer_size = chan->src_xfer_size; xfer_size = chan->src.xfer_size;
break; break;
case DMA_MEM_TO_DEV: case DMA_MEM_TO_DEV:
chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
| RCAR_DMACHCR_RS_DMARS; | RCAR_DMACHCR_RS_DMARS;
xfer_size = chan->dst_xfer_size; xfer_size = chan->dst.xfer_size;
break; break;
case DMA_MEM_TO_MEM: case DMA_MEM_TO_MEM:
...@@ -1023,13 +1042,65 @@ rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, ...@@ -1023,13 +1042,65 @@ rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
DMA_MEM_TO_MEM, flags, false); DMA_MEM_TO_MEM, flags, false);
} }
static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
enum dma_transfer_direction dir)
{
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
struct rcar_dmac_chan_map *map = &rchan->map;
phys_addr_t dev_addr;
size_t dev_size;
enum dma_data_direction dev_dir;
if (dir == DMA_DEV_TO_MEM) {
dev_addr = rchan->src.slave_addr;
dev_size = rchan->src.xfer_size;
dev_dir = DMA_TO_DEVICE;
} else {
dev_addr = rchan->dst.slave_addr;
dev_size = rchan->dst.xfer_size;
dev_dir = DMA_FROM_DEVICE;
}
/* Reuse current map if possible. */
if (dev_addr == map->slave.slave_addr &&
dev_size == map->slave.xfer_size &&
dev_dir == map->dir)
return 0;
/* Remove old mapping if present. */
if (map->slave.xfer_size)
dma_unmap_resource(chan->device->dev, map->addr,
map->slave.xfer_size, map->dir, 0);
map->slave.xfer_size = 0;
/* Create new slave address map. */
map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
dev_dir, 0);
if (dma_mapping_error(chan->device->dev, map->addr)) {
dev_err(chan->device->dev,
"chan%u: failed to map %zx@%pap", rchan->index,
dev_size, &dev_addr);
return -EIO;
}
dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
rchan->index, dev_size, &dev_addr, &map->addr,
dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
map->slave.slave_addr = dev_addr;
map->slave.xfer_size = dev_size;
map->dir = dev_dir;
return 0;
}
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction dir, unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags, void *context) unsigned long flags, void *context)
{ {
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
dma_addr_t dev_addr;
/* Someone calling slave DMA on a generic channel? */ /* Someone calling slave DMA on a generic channel? */
if (rchan->mid_rid < 0 || !sg_len) { if (rchan->mid_rid < 0 || !sg_len) {
...@@ -1039,9 +1110,10 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -1039,9 +1110,10 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL; return NULL;
} }
dev_addr = dir == DMA_DEV_TO_MEM if (rcar_dmac_map_slave_addr(chan, dir))
? rchan->src_slave_addr : rchan->dst_slave_addr; return NULL;
return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
dir, flags, false); dir, flags, false);
} }
...@@ -1055,7 +1127,6 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, ...@@ -1055,7 +1127,6 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
struct dma_async_tx_descriptor *desc; struct dma_async_tx_descriptor *desc;
struct scatterlist *sgl; struct scatterlist *sgl;
dma_addr_t dev_addr;
unsigned int sg_len; unsigned int sg_len;
unsigned int i; unsigned int i;
...@@ -1067,6 +1138,9 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, ...@@ -1067,6 +1138,9 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
return NULL; return NULL;
} }
if (rcar_dmac_map_slave_addr(chan, dir))
return NULL;
sg_len = buf_len / period_len; sg_len = buf_len / period_len;
if (sg_len > RCAR_DMAC_MAX_SG_LEN) { if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
dev_err(chan->device->dev, dev_err(chan->device->dev,
...@@ -1094,9 +1168,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, ...@@ -1094,9 +1168,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
sg_dma_len(&sgl[i]) = period_len; sg_dma_len(&sgl[i]) = period_len;
} }
dev_addr = dir == DMA_DEV_TO_MEM desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
? rchan->src_slave_addr : rchan->dst_slave_addr;
desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
dir, flags, true); dir, flags, true);
kfree(sgl); kfree(sgl);
...@@ -1112,10 +1184,10 @@ static int rcar_dmac_device_config(struct dma_chan *chan, ...@@ -1112,10 +1184,10 @@ static int rcar_dmac_device_config(struct dma_chan *chan,
* We could lock this, but you shouldn't be configuring the * We could lock this, but you shouldn't be configuring the
* channel, while using it... * channel, while using it...
*/ */
rchan->src_slave_addr = cfg->src_addr; rchan->src.slave_addr = cfg->src_addr;
rchan->dst_slave_addr = cfg->dst_addr; rchan->dst.slave_addr = cfg->dst_addr;
rchan->src_xfer_size = cfg->src_addr_width; rchan->src.xfer_size = cfg->src_addr_width;
rchan->dst_xfer_size = cfg->dst_addr_width; rchan->dst.xfer_size = cfg->dst_addr_width;
return 0; return 0;
} }
......
...@@ -56,6 +56,13 @@ extern void debug_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -56,6 +56,13 @@ extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
extern void debug_dma_free_coherent(struct device *dev, size_t size, extern void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr); void *virt, dma_addr_t addr);
extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
size_t size, int direction,
dma_addr_t dma_addr);
extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction);
extern void debug_dma_sync_single_for_cpu(struct device *dev, extern void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, dma_addr_t dma_handle, size_t size,
int direction); int direction);
...@@ -141,6 +148,18 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size, ...@@ -141,6 +148,18 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
{ {
} }
static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
size_t size, int direction,
dma_addr_t dma_addr)
{
}
static inline void debug_dma_unmap_resource(struct device *dev,
dma_addr_t dma_addr, size_t size,
int direction)
{
}
static inline void debug_dma_sync_single_for_cpu(struct device *dev, static inline void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, dma_addr_t dma_handle,
size_t size, int direction) size_t size, int direction)
......
...@@ -95,6 +95,12 @@ struct dma_map_ops { ...@@ -95,6 +95,12 @@ struct dma_map_ops {
struct scatterlist *sg, int nents, struct scatterlist *sg, int nents,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs); unsigned long attrs);
dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs);
void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs);
void (*sync_single_for_cpu)(struct device *dev, void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size, dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir); enum dma_data_direction dir);
...@@ -258,6 +264,41 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, ...@@ -258,6 +264,41 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
debug_dma_unmap_page(dev, addr, size, dir, false); debug_dma_unmap_page(dev, addr, size, dir, false);
} }
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr,
size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
/* Don't allow RAM to be mapped */
BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
addr = phys_addr;
if (ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
return addr;
}
static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
......
...@@ -43,6 +43,7 @@ enum { ...@@ -43,6 +43,7 @@ enum {
dma_debug_page, dma_debug_page,
dma_debug_sg, dma_debug_sg,
dma_debug_coherent, dma_debug_coherent,
dma_debug_resource,
}; };
enum map_err_types { enum map_err_types {
...@@ -150,8 +151,9 @@ static const char *const maperr2str[] = { ...@@ -150,8 +151,9 @@ static const char *const maperr2str[] = {
[MAP_ERR_CHECKED] = "dma map error checked", [MAP_ERR_CHECKED] = "dma map error checked",
}; };
static const char *type2name[4] = { "single", "page", static const char *type2name[5] = { "single", "page",
"scather-gather", "coherent" }; "scather-gather", "coherent",
"resource" };
static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
"DMA_FROM_DEVICE", "DMA_NONE" }; "DMA_FROM_DEVICE", "DMA_NONE" };
...@@ -399,6 +401,9 @@ static void hash_bucket_del(struct dma_debug_entry *entry) ...@@ -399,6 +401,9 @@ static void hash_bucket_del(struct dma_debug_entry *entry)
static unsigned long long phys_addr(struct dma_debug_entry *entry) static unsigned long long phys_addr(struct dma_debug_entry *entry)
{ {
if (entry->type == dma_debug_resource)
return __pfn_to_phys(entry->pfn) + entry->offset;
return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
} }
...@@ -1495,6 +1500,49 @@ void debug_dma_free_coherent(struct device *dev, size_t size, ...@@ -1495,6 +1500,49 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
} }
EXPORT_SYMBOL(debug_dma_free_coherent); EXPORT_SYMBOL(debug_dma_free_coherent);
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
int direction, dma_addr_t dma_addr)
{
struct dma_debug_entry *entry;
if (unlikely(dma_debug_disabled()))
return;
entry = dma_entry_alloc();
if (!entry)
return;
entry->type = dma_debug_resource;
entry->dev = dev;
entry->pfn = PHYS_PFN(addr);
entry->offset = offset_in_page(addr);
entry->size = size;
entry->dev_addr = dma_addr;
entry->direction = direction;
entry->map_err_type = MAP_ERR_NOT_CHECKED;
add_dma_entry(entry);
}
EXPORT_SYMBOL(debug_dma_map_resource);
void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction)
{
struct dma_debug_entry ref = {
.type = dma_debug_resource,
.dev = dev,
.dev_addr = dma_addr,
.size = size,
.direction = direction,
};
if (unlikely(dma_debug_disabled()))
return;
check_unmap(&ref);
}
EXPORT_SYMBOL(debug_dma_unmap_resource);
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, int direction) size_t size, int direction)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment