Commit 954b7207 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-5.13' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - add a new dma_alloc_noncontiguous API (me, Ricardo Ribalda)

 - fix a copyright notice (Hao Fang)

 - add an unlikely annotation to dma_mapping_error (Heiner Kallweit)

 - remove a pointless empty line (Wang Qing)

 - add support for multi-pages map/unmap bencharking (Xiang Chen)

* tag 'dma-mapping-5.13' of git://git.infradead.org/users/hch/dma-mapping:
  dma-mapping: add unlikely hint to error path in dma_mapping_error
  dma-mapping: benchmark: Add support for multi-pages map/unmap
  dma-mapping: benchmark: use the correct HiSilicon copyright
  dma-mapping: remove a pointless empty line in dma_alloc_coherent
  media: uvcvideo: Use dma_alloc_noncontiguous API
  dma-iommu: implement ->alloc_noncontiguous
  dma-iommu: refactor iommu_dma_alloc_remap
  dma-mapping: add a dma_alloc_noncontiguous API
  dma-mapping: refactor dma_{alloc,free}_pages
  dma-mapping: add a dma_mmap_pages helper
parents 51e6f07c a7f3d3d3
...@@ -563,6 +563,16 @@ Free a region of memory previously allocated using dma_alloc_pages(). ...@@ -563,6 +563,16 @@ Free a region of memory previously allocated using dma_alloc_pages().
dev, size, dma_handle and dir must all be the same as those passed into dev, size, dma_handle and dir must all be the same as those passed into
dma_alloc_pages(). page must be the pointer returned by dma_alloc_pages(). dma_alloc_pages(). page must be the pointer returned by dma_alloc_pages().
::
int
dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
size_t size, struct page *page)
Map an allocation returned from dma_alloc_pages() into a user address space.
dev and size must be the same as those passed into dma_alloc_pages().
page must be the pointer returned by dma_alloc_pages().
:: ::
void * void *
...@@ -584,6 +594,84 @@ dev, size, dma_handle and dir must all be the same as those passed into ...@@ -584,6 +594,84 @@ dev, size, dma_handle and dir must all be the same as those passed into
dma_alloc_noncoherent(). cpu_addr must be the virtual address returned by dma_alloc_noncoherent(). cpu_addr must be the virtual address returned by
dma_alloc_noncoherent(). dma_alloc_noncoherent().
::
struct sg_table *
dma_alloc_noncontiguous(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp,
unsigned long attrs);
This routine allocates <size> bytes of non-coherent and possibly non-contiguous
memory. It returns a pointer to struct sg_table that describes the allocated
and DMA mapped memory, or NULL if the allocation failed. The resulting memory
can be used for struct page mapped into a scatterlist are suitable for.
The return sg_table is guaranteed to have 1 single DMA mapped segment as
indicated by sgt->nents, but it might have multiple CPU side segments as
indicated by sgt->orig_nents.
The dir parameter specified if data is read and/or written by the device,
see dma_map_single() for details.
The gfp parameter allows the caller to specify the ``GFP_`` flags (see
kmalloc()) for the allocation, but rejects flags used to specify a memory
zone such as GFP_DMA or GFP_HIGHMEM.
The attrs argument must be either 0 or DMA_ATTR_ALLOC_SINGLE_PAGES.
Before giving the memory to the device, dma_sync_sgtable_for_device() needs
to be called, and before reading memory written by the device,
dma_sync_sgtable_for_cpu(), just like for streaming DMA mappings that are
reused.
::
void
dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt,
enum dma_data_direction dir)
Free memory previously allocated using dma_alloc_noncontiguous(). dev, size,
and dir must all be the same as those passed into dma_alloc_noncontiguous().
sgt must be the pointer returned by dma_alloc_noncontiguous().
::
void *
dma_vmap_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt)
Return a contiguous kernel mapping for an allocation returned from
dma_alloc_noncontiguous(). dev and size must be the same as those passed into
dma_alloc_noncontiguous(). sgt must be the pointer returned by
dma_alloc_noncontiguous().
Once a non-contiguous allocation is mapped using this function, the
flush_kernel_vmap_range() and invalidate_kernel_vmap_range() APIs must be used
to manage the coherency between the kernel mapping, the device and user space
mappings (if any).
::
void
dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
Unmap a kernel mapping returned by dma_vmap_noncontiguous(). dev must be the
same the one passed into dma_alloc_noncontiguous(). vaddr must be the pointer
returned by dma_vmap_noncontiguous().
::
int
dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
size_t size, struct sg_table *sgt)
Map an allocation returned from dma_alloc_noncontiguous() into a user address
space. dev and size must be the same as those passed into
dma_alloc_noncontiguous(). sgt must be the pointer returned by
dma_alloc_noncontiguous().
:: ::
int int
......
...@@ -647,23 +647,12 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, ...@@ -647,23 +647,12 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
return pages; return pages;
} }
/** /*
* iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
* @dev: Device to allocate memory for. Must be a real device
* attached to an iommu_dma_domain
* @size: Size of buffer in bytes
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
* @prot: pgprot_t to use for the remapped mapping
* @attrs: DMA attributes for this allocation
*
* If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
* but an IOMMU which supports smaller pages might not map the whole thing. * but an IOMMU which supports smaller pages might not map the whole thing.
*
* Return: Mapped virtual address, or NULL on failure.
*/ */
static void *iommu_dma_alloc_remap(struct device *dev, size_t size, static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
unsigned long attrs) unsigned long attrs)
{ {
struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_domain *domain = iommu_get_dma_domain(dev);
...@@ -673,11 +662,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, ...@@ -673,11 +662,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages; struct page **pages;
struct sg_table sgt;
dma_addr_t iova; dma_addr_t iova;
void *vaddr;
*dma_handle = DMA_MAPPING_ERROR;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) && if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain)) iommu_deferred_attach(dev, domain))
...@@ -704,41 +689,91 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, ...@@ -704,41 +689,91 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
if (!iova) if (!iova)
goto out_free_pages; goto out_free_pages;
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova; goto out_free_iova;
if (!(ioprot & IOMMU_CACHE)) { if (!(ioprot & IOMMU_CACHE)) {
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
arch_dma_prep_coherent(sg_page(sg), sg->length); arch_dma_prep_coherent(sg_page(sg), sg->length);
} }
if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
< size) < size)
goto out_free_sg; goto out_free_sg;
sgt->sgl->dma_address = iova;
sgt->sgl->dma_length = size;
return pages;
out_free_sg:
sg_free_table(sgt);
out_free_iova:
iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
}
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
unsigned long attrs)
{
struct page **pages;
struct sg_table sgt;
void *vaddr;
pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
attrs);
if (!pages)
return NULL;
*dma_handle = sgt.sgl->dma_address;
sg_free_table(&sgt);
vaddr = dma_common_pages_remap(pages, size, prot, vaddr = dma_common_pages_remap(pages, size, prot,
__builtin_return_address(0)); __builtin_return_address(0));
if (!vaddr) if (!vaddr)
goto out_unmap; goto out_unmap;
*dma_handle = iova;
sg_free_table(&sgt);
return vaddr; return vaddr;
out_unmap: out_unmap:
__iommu_dma_unmap(dev, iova, size); __iommu_dma_unmap(dev, *dma_handle, size);
out_free_sg: __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
sg_free_table(&sgt);
out_free_iova:
iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL; return NULL;
} }
#ifdef CONFIG_DMA_REMAP
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
size_t size, enum dma_data_direction dir, gfp_t gfp,
unsigned long attrs)
{
struct dma_sgt_handle *sh;
sh = kmalloc(sizeof(*sh), gfp);
if (!sh)
return NULL;
sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
PAGE_KERNEL, attrs);
if (!sh->pages) {
kfree(sh);
return NULL;
}
return &sh->sgt;
}
static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
struct dma_sgt_handle *sh = sgt_handle(sgt);
__iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
sg_free_table(&sh->sgt);
}
#endif /* CONFIG_DMA_REMAP */
static void iommu_dma_sync_single_for_cpu(struct device *dev, static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{ {
...@@ -1255,6 +1290,10 @@ static const struct dma_map_ops iommu_dma_ops = { ...@@ -1255,6 +1290,10 @@ static const struct dma_map_ops iommu_dma_ops = {
.free = iommu_dma_free, .free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages, .alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages, .free_pages = dma_common_free_pages,
#ifdef CONFIG_DMA_REMAP
.alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
.free_noncontiguous = iommu_dma_free_noncontiguous,
#endif
.mmap = iommu_dma_mmap, .mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable, .get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page, .map_page = iommu_dma_map_page,
......
...@@ -6,11 +6,14 @@ ...@@ -6,11 +6,14 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com) * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/ */
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/usb.h> #include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/videodev2.h> #include <linux/videodev2.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/wait.h> #include <linux/wait.h>
...@@ -1096,6 +1099,29 @@ static int uvc_video_decode_start(struct uvc_streaming *stream, ...@@ -1096,6 +1099,29 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return data[0]; return data[0];
} }
static inline enum dma_data_direction uvc_stream_dir(
struct uvc_streaming *stream)
{
if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
return DMA_FROM_DEVICE;
else
return DMA_TO_DEVICE;
}
static inline struct device *uvc_stream_to_dmadev(struct uvc_streaming *stream)
{
return bus_to_hcd(stream->dev->udev->bus)->self.sysdev;
}
static int uvc_submit_urb(struct uvc_urb *uvc_urb, gfp_t mem_flags)
{
/* Sync DMA. */
dma_sync_sgtable_for_device(uvc_stream_to_dmadev(uvc_urb->stream),
uvc_urb->sgt,
uvc_stream_dir(uvc_urb->stream));
return usb_submit_urb(uvc_urb->urb, mem_flags);
}
/* /*
* uvc_video_decode_data_work: Asynchronous memcpy processing * uvc_video_decode_data_work: Asynchronous memcpy processing
* *
...@@ -1117,7 +1143,7 @@ static void uvc_video_copy_data_work(struct work_struct *work) ...@@ -1117,7 +1143,7 @@ static void uvc_video_copy_data_work(struct work_struct *work)
uvc_queue_buffer_release(op->buf); uvc_queue_buffer_release(op->buf);
} }
ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL); ret = uvc_submit_urb(uvc_urb, GFP_KERNEL);
if (ret < 0) if (ret < 0)
dev_err(&uvc_urb->stream->intf->dev, dev_err(&uvc_urb->stream->intf->dev,
"Failed to resubmit video URB (%d).\n", ret); "Failed to resubmit video URB (%d).\n", ret);
...@@ -1537,6 +1563,12 @@ static void uvc_video_complete(struct urb *urb) ...@@ -1537,6 +1563,12 @@ static void uvc_video_complete(struct urb *urb)
/* Re-initialise the URB async work. */ /* Re-initialise the URB async work. */
uvc_urb->async_operations = 0; uvc_urb->async_operations = 0;
/* Sync DMA and invalidate vmap range. */
dma_sync_sgtable_for_cpu(uvc_stream_to_dmadev(uvc_urb->stream),
uvc_urb->sgt, uvc_stream_dir(stream));
invalidate_kernel_vmap_range(uvc_urb->buffer,
uvc_urb->stream->urb_size);
/* /*
* Process the URB headers, and optionally queue expensive memcpy tasks * Process the URB headers, and optionally queue expensive memcpy tasks
* to be deferred to a work queue. * to be deferred to a work queue.
...@@ -1545,7 +1577,7 @@ static void uvc_video_complete(struct urb *urb) ...@@ -1545,7 +1577,7 @@ static void uvc_video_complete(struct urb *urb)
/* If no async work is needed, resubmit the URB immediately. */ /* If no async work is needed, resubmit the URB immediately. */
if (!uvc_urb->async_operations) { if (!uvc_urb->async_operations) {
ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC); ret = uvc_submit_urb(uvc_urb, GFP_ATOMIC);
if (ret < 0) if (ret < 0)
dev_err(&stream->intf->dev, dev_err(&stream->intf->dev,
"Failed to resubmit video URB (%d).\n", ret); "Failed to resubmit video URB (%d).\n", ret);
...@@ -1560,24 +1592,49 @@ static void uvc_video_complete(struct urb *urb) ...@@ -1560,24 +1592,49 @@ static void uvc_video_complete(struct urb *urb)
*/ */
static void uvc_free_urb_buffers(struct uvc_streaming *stream) static void uvc_free_urb_buffers(struct uvc_streaming *stream)
{ {
struct device *dma_dev = uvc_stream_to_dmadev(stream);
struct uvc_urb *uvc_urb; struct uvc_urb *uvc_urb;
for_each_uvc_urb(uvc_urb, stream) { for_each_uvc_urb(uvc_urb, stream) {
if (!uvc_urb->buffer) if (!uvc_urb->buffer)
continue; continue;
#ifndef CONFIG_DMA_NONCOHERENT dma_vunmap_noncontiguous(dma_dev, uvc_urb->buffer);
usb_free_coherent(stream->dev->udev, stream->urb_size, dma_free_noncontiguous(dma_dev, stream->urb_size, uvc_urb->sgt,
uvc_urb->buffer, uvc_urb->dma); uvc_stream_dir(stream));
#else
kfree(uvc_urb->buffer);
#endif
uvc_urb->buffer = NULL; uvc_urb->buffer = NULL;
uvc_urb->sgt = NULL;
} }
stream->urb_size = 0; stream->urb_size = 0;
} }
static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream,
struct uvc_urb *uvc_urb, gfp_t gfp_flags)
{
struct device *dma_dev = uvc_stream_to_dmadev(stream);
uvc_urb->sgt = dma_alloc_noncontiguous(dma_dev, stream->urb_size,
uvc_stream_dir(stream),
gfp_flags, 0);
if (!uvc_urb->sgt)
return false;
uvc_urb->dma = uvc_urb->sgt->sgl->dma_address;
uvc_urb->buffer = dma_vmap_noncontiguous(dma_dev, stream->urb_size,
uvc_urb->sgt);
if (!uvc_urb->buffer) {
dma_free_noncontiguous(dma_dev, stream->urb_size,
uvc_urb->sgt,
uvc_stream_dir(stream));
uvc_urb->sgt = NULL;
return false;
}
return true;
}
/* /*
* Allocate transfer buffers. This function can be called with buffers * Allocate transfer buffers. This function can be called with buffers
* already allocated when resuming from suspend, in which case it will * already allocated when resuming from suspend, in which case it will
...@@ -1608,19 +1665,12 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream, ...@@ -1608,19 +1665,12 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
/* Retry allocations until one succeed. */ /* Retry allocations until one succeed. */
for (; npackets > 1; npackets /= 2) { for (; npackets > 1; npackets /= 2) {
stream->urb_size = psize * npackets;
for (i = 0; i < UVC_URBS; ++i) { for (i = 0; i < UVC_URBS; ++i) {
struct uvc_urb *uvc_urb = &stream->uvc_urb[i]; struct uvc_urb *uvc_urb = &stream->uvc_urb[i];
stream->urb_size = psize * npackets; if (!uvc_alloc_urb_buffer(stream, uvc_urb, gfp_flags)) {
#ifndef CONFIG_DMA_NONCOHERENT
uvc_urb->buffer = usb_alloc_coherent(
stream->dev->udev, stream->urb_size,
gfp_flags | __GFP_NOWARN, &uvc_urb->dma);
#else
uvc_urb->buffer =
kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
#endif
if (!uvc_urb->buffer) {
uvc_free_urb_buffers(stream); uvc_free_urb_buffers(stream);
break; break;
} }
...@@ -1730,12 +1780,8 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream, ...@@ -1730,12 +1780,8 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream,
urb->context = uvc_urb; urb->context = uvc_urb;
urb->pipe = usb_rcvisocpipe(stream->dev->udev, urb->pipe = usb_rcvisocpipe(stream->dev->udev,
ep->desc.bEndpointAddress); ep->desc.bEndpointAddress);
#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = uvc_urb->dma; urb->transfer_dma = uvc_urb->dma;
#else
urb->transfer_flags = URB_ISO_ASAP;
#endif
urb->interval = ep->desc.bInterval; urb->interval = ep->desc.bInterval;
urb->transfer_buffer = uvc_urb->buffer; urb->transfer_buffer = uvc_urb->buffer;
urb->complete = uvc_video_complete; urb->complete = uvc_video_complete;
...@@ -1795,10 +1841,8 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream, ...@@ -1795,10 +1841,8 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
usb_fill_bulk_urb(urb, stream->dev->udev, pipe, uvc_urb->buffer, usb_fill_bulk_urb(urb, stream->dev->udev, pipe, uvc_urb->buffer,
size, uvc_video_complete, uvc_urb); size, uvc_video_complete, uvc_urb);
#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = uvc_urb->dma; urb->transfer_dma = uvc_urb->dma;
#endif
uvc_urb->urb = urb; uvc_urb->urb = urb;
} }
...@@ -1895,7 +1939,7 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream, ...@@ -1895,7 +1939,7 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
/* Submit the URBs. */ /* Submit the URBs. */
for_each_uvc_urb(uvc_urb, stream) { for_each_uvc_urb(uvc_urb, stream) {
ret = usb_submit_urb(uvc_urb->urb, gfp_flags); ret = uvc_submit_urb(uvc_urb, gfp_flags);
if (ret < 0) { if (ret < 0) {
dev_err(&stream->intf->dev, dev_err(&stream->intf->dev,
"Failed to submit URB %u (%d).\n", "Failed to submit URB %u (%d).\n",
......
...@@ -219,6 +219,7 @@ ...@@ -219,6 +219,7 @@
*/ */
struct gpio_desc; struct gpio_desc;
struct sg_table;
struct uvc_device; struct uvc_device;
/* TODO: Put the most frequently accessed fields at the beginning of /* TODO: Put the most frequently accessed fields at the beginning of
...@@ -545,7 +546,8 @@ struct uvc_copy_op { ...@@ -545,7 +546,8 @@ struct uvc_copy_op {
* @urb: the URB described by this context structure * @urb: the URB described by this context structure
* @stream: UVC streaming context * @stream: UVC streaming context
* @buffer: memory storage for the URB * @buffer: memory storage for the URB
* @dma: DMA coherent addressing for the urb_buffer * @dma: Allocated DMA handle
* @sgt: sgt_table with the urb locations in memory
* @async_operations: counter to indicate the number of copy operations * @async_operations: counter to indicate the number of copy operations
* @copy_operations: work descriptors for asynchronous copy operations * @copy_operations: work descriptors for asynchronous copy operations
* @work: work queue entry for asynchronous decode * @work: work queue entry for asynchronous decode
...@@ -556,6 +558,7 @@ struct uvc_urb { ...@@ -556,6 +558,7 @@ struct uvc_urb {
char *buffer; char *buffer;
dma_addr_t dma; dma_addr_t dma;
struct sg_table *sgt;
unsigned int async_operations; unsigned int async_operations;
struct uvc_copy_op copy_operations[UVC_MAX_PACKETS]; struct uvc_copy_op copy_operations[UVC_MAX_PACKETS];
......
...@@ -22,6 +22,11 @@ struct dma_map_ops { ...@@ -22,6 +22,11 @@ struct dma_map_ops {
gfp_t gfp); gfp_t gfp);
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir); dma_addr_t dma_handle, enum dma_data_direction dir);
struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp,
unsigned long attrs);
void (*free_noncontiguous)(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir);
int (*mmap)(struct device *, struct vm_area_struct *, int (*mmap)(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t, unsigned long attrs); void *, dma_addr_t, size_t, unsigned long attrs);
...@@ -198,6 +203,20 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, ...@@ -198,6 +203,20 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
} }
#endif /* CONFIG_DMA_DECLARE_COHERENT */ #endif /* CONFIG_DMA_DECLARE_COHERENT */
/*
* This is the actual return value from the ->alloc_noncontiguous method.
* The users of the DMA API should only care about the sg_table, but to make
* the DMA-API internal vmaping and freeing easier we stash away the page
* array as well (except for the fallback case). This can go away any time,
* e.g. when a vmap-variant that takes a scatterlist comes along.
*/
struct dma_sgt_handle {
struct sg_table sgt;
struct page **pages;
};
#define sgt_handle(sgt) \
container_of((sgt), struct dma_sgt_handle, sgt)
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size, void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs); unsigned long attrs);
......
...@@ -95,7 +95,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -95,7 +95,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{ {
debug_dma_mapping_error(dev, dma_addr); debug_dma_mapping_error(dev, dma_addr);
if (dma_addr == DMA_MAPPING_ERROR) if (unlikely(dma_addr == DMA_MAPPING_ERROR))
return -ENOMEM; return -ENOMEM;
return 0; return 0;
} }
...@@ -144,6 +144,15 @@ u64 dma_get_required_mask(struct device *dev); ...@@ -144,6 +144,15 @@ u64 dma_get_required_mask(struct device *dev);
size_t dma_max_mapping_size(struct device *dev); size_t dma_max_mapping_size(struct device *dev);
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
unsigned long dma_get_merge_boundary(struct device *dev); unsigned long dma_get_merge_boundary(struct device *dev);
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
void dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir);
void *dma_vmap_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt);
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
size_t size, struct sg_table *sgt);
#else /* CONFIG_HAS_DMA */ #else /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_page_attrs(struct device *dev, static inline dma_addr_t dma_map_page_attrs(struct device *dev,
struct page *page, size_t offset, size_t size, struct page *page, size_t offset, size_t size,
...@@ -257,12 +266,37 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev) ...@@ -257,12 +266,37 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev)
{ {
return 0; return 0;
} }
static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
size_t size, enum dma_data_direction dir, gfp_t gfp,
unsigned long attrs)
{
return NULL;
}
static inline void dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
}
static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt)
{
return NULL;
}
static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
{
}
static inline int dma_mmap_noncontiguous(struct device *dev,
struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
{
return -EINVAL;
}
#endif /* CONFIG_HAS_DMA */ #endif /* CONFIG_HAS_DMA */
struct page *dma_alloc_pages(struct device *dev, size_t size, struct page *dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_free_pages(struct device *dev, size_t size, struct page *page, void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir); dma_addr_t dma_handle, enum dma_data_direction dir);
int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
size_t size, struct page *page);
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
...@@ -401,7 +435,6 @@ static inline void dma_sync_sgtable_for_device(struct device *dev, ...@@ -401,7 +435,6 @@ static inline void dma_sync_sgtable_for_device(struct device *dev,
static inline void *dma_alloc_coherent(struct device *dev, size_t size, static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp) dma_addr_t *dma_handle, gfp_t gfp)
{ {
return dma_alloc_attrs(dev, size, dma_handle, gfp, return dma_alloc_attrs(dev, size, dma_handle, gfp,
(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
} }
......
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (C) 2020 Hisilicon Limited. * Copyright (C) 2020 HiSilicon Limited.
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
...@@ -38,7 +38,8 @@ struct map_benchmark { ...@@ -38,7 +38,8 @@ struct map_benchmark {
__u32 dma_bits; /* DMA addressing capability */ __u32 dma_bits; /* DMA addressing capability */
__u32 dma_dir; /* DMA data direction */ __u32 dma_dir; /* DMA data direction */
__u32 dma_trans_ns; /* time for DMA transmission in ns */ __u32 dma_trans_ns; /* time for DMA transmission in ns */
__u8 expansion[80]; /* For future use */ __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
__u8 expansion[76]; /* For future use */
}; };
struct map_benchmark_data { struct map_benchmark_data {
...@@ -58,9 +59,11 @@ static int map_benchmark_thread(void *data) ...@@ -58,9 +59,11 @@ static int map_benchmark_thread(void *data)
void *buf; void *buf;
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct map_benchmark_data *map = data; struct map_benchmark_data *map = data;
int npages = map->bparam.granule;
u64 size = npages * PAGE_SIZE;
int ret = 0; int ret = 0;
buf = (void *)__get_free_page(GFP_KERNEL); buf = alloc_pages_exact(size, GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
...@@ -76,10 +79,10 @@ static int map_benchmark_thread(void *data) ...@@ -76,10 +79,10 @@ static int map_benchmark_thread(void *data)
* 66 means evertything goes well! 66 is lucky. * 66 means evertything goes well! 66 is lucky.
*/ */
if (map->dir != DMA_FROM_DEVICE) if (map->dir != DMA_FROM_DEVICE)
memset(buf, 0x66, PAGE_SIZE); memset(buf, 0x66, size);
map_stime = ktime_get(); map_stime = ktime_get();
dma_addr = dma_map_single(map->dev, buf, PAGE_SIZE, map->dir); dma_addr = dma_map_single(map->dev, buf, size, map->dir);
if (unlikely(dma_mapping_error(map->dev, dma_addr))) { if (unlikely(dma_mapping_error(map->dev, dma_addr))) {
pr_err("dma_map_single failed on %s\n", pr_err("dma_map_single failed on %s\n",
dev_name(map->dev)); dev_name(map->dev));
...@@ -93,7 +96,7 @@ static int map_benchmark_thread(void *data) ...@@ -93,7 +96,7 @@ static int map_benchmark_thread(void *data)
ndelay(map->bparam.dma_trans_ns); ndelay(map->bparam.dma_trans_ns);
unmap_stime = ktime_get(); unmap_stime = ktime_get();
dma_unmap_single(map->dev, dma_addr, PAGE_SIZE, map->dir); dma_unmap_single(map->dev, dma_addr, size, map->dir);
unmap_etime = ktime_get(); unmap_etime = ktime_get();
unmap_delta = ktime_sub(unmap_etime, unmap_stime); unmap_delta = ktime_sub(unmap_etime, unmap_stime);
...@@ -112,7 +115,7 @@ static int map_benchmark_thread(void *data) ...@@ -112,7 +115,7 @@ static int map_benchmark_thread(void *data)
} }
out: out:
free_page((unsigned long)buf); free_pages_exact(buf, size);
return ret; return ret;
} }
...@@ -203,7 +206,6 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd, ...@@ -203,7 +206,6 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
struct map_benchmark_data *map = file->private_data; struct map_benchmark_data *map = file->private_data;
void __user *argp = (void __user *)arg; void __user *argp = (void __user *)arg;
u64 old_dma_mask; u64 old_dma_mask;
int ret; int ret;
if (copy_from_user(&map->bparam, argp, sizeof(map->bparam))) if (copy_from_user(&map->bparam, argp, sizeof(map->bparam)))
...@@ -234,6 +236,11 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd, ...@@ -234,6 +236,11 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
return -EINVAL; return -EINVAL;
} }
if (map->bparam.granule < 1 || map->bparam.granule > 1024) {
pr_err("invalid granule size\n");
return -EINVAL;
}
switch (map->bparam.dma_dir) { switch (map->bparam.dma_dir) {
case DMA_MAP_BIDIRECTIONAL: case DMA_MAP_BIDIRECTIONAL:
map->dir = DMA_BIDIRECTIONAL; map->dir = DMA_BIDIRECTIONAL;
......
...@@ -477,11 +477,10 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, ...@@ -477,11 +477,10 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
} }
EXPORT_SYMBOL(dma_free_attrs); EXPORT_SYMBOL(dma_free_attrs);
struct page *dma_alloc_pages(struct device *dev, size_t size, static struct page *__dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
struct page *page;
if (WARN_ON_ONCE(!dev->coherent_dma_mask)) if (WARN_ON_ONCE(!dev->coherent_dma_mask))
return NULL; return NULL;
...@@ -490,33 +489,162 @@ struct page *dma_alloc_pages(struct device *dev, size_t size, ...@@ -490,33 +489,162 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (dma_alloc_direct(dev, ops)) if (dma_alloc_direct(dev, ops))
page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
else if (ops->alloc_pages) if (!ops->alloc_pages)
page = ops->alloc_pages(dev, size, dma_handle, dir, gfp);
else
return NULL; return NULL;
return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
}
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle); struct page *dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{
struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
if (page)
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
return page; return page;
} }
EXPORT_SYMBOL_GPL(dma_alloc_pages); EXPORT_SYMBOL_GPL(dma_alloc_pages);
void dma_free_pages(struct device *dev, size_t size, struct page *page, static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir) dma_addr_t dma_handle, enum dma_data_direction dir)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
debug_dma_unmap_page(dev, dma_handle, size, dir);
if (dma_alloc_direct(dev, ops)) if (dma_alloc_direct(dev, ops))
dma_direct_free_pages(dev, size, page, dma_handle, dir); dma_direct_free_pages(dev, size, page, dma_handle, dir);
else if (ops->free_pages) else if (ops->free_pages)
ops->free_pages(dev, size, page, dma_handle, dir); ops->free_pages(dev, size, page, dma_handle, dir);
} }
void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir)
{
debug_dma_unmap_page(dev, dma_handle, size, dir);
__dma_free_pages(dev, size, page, dma_handle, dir);
}
EXPORT_SYMBOL_GPL(dma_free_pages); EXPORT_SYMBOL_GPL(dma_free_pages);
int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
size_t size, struct page *page)
{
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
return -ENXIO;
return remap_pfn_range(vma, vma->vm_start,
page_to_pfn(page) + vma->vm_pgoff,
vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
}
EXPORT_SYMBOL_GPL(dma_mmap_pages);
static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp)
{
struct sg_table *sgt;
struct page *page;
sgt = kmalloc(sizeof(*sgt), gfp);
if (!sgt)
return NULL;
if (sg_alloc_table(sgt, 1, gfp))
goto out_free_sgt;
page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
if (!page)
goto out_free_table;
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
sg_dma_len(sgt->sgl) = sgt->sgl->length;
return sgt;
out_free_table:
sg_free_table(sgt);
out_free_sgt:
kfree(sgt);
return NULL;
}
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
struct sg_table *sgt;
if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
return NULL;
if (ops && ops->alloc_noncontiguous)
sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
else
sgt = alloc_single_sgt(dev, size, dir, gfp);
if (sgt) {
sgt->nents = 1;
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir);
}
return sgt;
}
EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
static void free_single_sgt(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
__dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
dir);
sg_free_table(sgt);
kfree(sgt);
}
void dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
if (ops && ops->free_noncontiguous)
ops->free_noncontiguous(dev, size, sgt, dir);
else
free_single_sgt(dev, size, sgt, dir);
}
EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
void *dma_vmap_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
if (ops && ops->alloc_noncontiguous)
return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
return page_address(sg_page(sgt->sgl));
}
EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
if (ops && ops->alloc_noncontiguous)
vunmap(vaddr);
}
EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
size_t size, struct sg_table *sgt)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
if (ops && ops->alloc_noncontiguous) {
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
if (vma->vm_pgoff >= count ||
vma_pages(vma) > count - vma->vm_pgoff)
return -ENXIO;
return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
}
return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
}
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
int dma_supported(struct device *dev, u64 mask) int dma_supported(struct device *dev, u64 mask)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
......
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (C) 2020 Hisilicon Limited. * Copyright (C) 2020 HiSilicon Limited.
*/ */
#include <fcntl.h> #include <fcntl.h>
...@@ -40,7 +40,8 @@ struct map_benchmark { ...@@ -40,7 +40,8 @@ struct map_benchmark {
__u32 dma_bits; /* DMA addressing capability */ __u32 dma_bits; /* DMA addressing capability */
__u32 dma_dir; /* DMA data direction */ __u32 dma_dir; /* DMA data direction */
__u32 dma_trans_ns; /* time for DMA transmission in ns */ __u32 dma_trans_ns; /* time for DMA transmission in ns */
__u8 expansion[80]; /* For future use */ __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
__u8 expansion[76]; /* For future use */
}; };
int main(int argc, char **argv) int main(int argc, char **argv)
...@@ -51,11 +52,13 @@ int main(int argc, char **argv) ...@@ -51,11 +52,13 @@ int main(int argc, char **argv)
int threads = 1, seconds = 20, node = -1; int threads = 1, seconds = 20, node = -1;
/* default dma mask 32bit, bidirectional DMA */ /* default dma mask 32bit, bidirectional DMA */
int bits = 32, xdelay = 0, dir = DMA_MAP_BIDIRECTIONAL; int bits = 32, xdelay = 0, dir = DMA_MAP_BIDIRECTIONAL;
/* default granule 1 PAGESIZE */
int granule = 1;
int cmd = DMA_MAP_BENCHMARK; int cmd = DMA_MAP_BENCHMARK;
char *p; char *p;
while ((opt = getopt(argc, argv, "t:s:n:b:d:x:")) != -1) { while ((opt = getopt(argc, argv, "t:s:n:b:d:x:g:")) != -1) {
switch (opt) { switch (opt) {
case 't': case 't':
threads = atoi(optarg); threads = atoi(optarg);
...@@ -75,6 +78,9 @@ int main(int argc, char **argv) ...@@ -75,6 +78,9 @@ int main(int argc, char **argv)
case 'x': case 'x':
xdelay = atoi(optarg); xdelay = atoi(optarg);
break; break;
case 'g':
granule = atoi(optarg);
break;
default: default:
return -1; return -1;
} }
...@@ -110,6 +116,11 @@ int main(int argc, char **argv) ...@@ -110,6 +116,11 @@ int main(int argc, char **argv)
exit(1); exit(1);
} }
if (granule < 1 || granule > 1024) {
fprintf(stderr, "invalid granule size\n");
exit(1);
}
fd = open("/sys/kernel/debug/dma_map_benchmark", O_RDWR); fd = open("/sys/kernel/debug/dma_map_benchmark", O_RDWR);
if (fd == -1) { if (fd == -1) {
perror("open"); perror("open");
...@@ -123,14 +134,15 @@ int main(int argc, char **argv) ...@@ -123,14 +134,15 @@ int main(int argc, char **argv)
map.dma_bits = bits; map.dma_bits = bits;
map.dma_dir = dir; map.dma_dir = dir;
map.dma_trans_ns = xdelay; map.dma_trans_ns = xdelay;
map.granule = granule;
if (ioctl(fd, cmd, &map)) { if (ioctl(fd, cmd, &map)) {
perror("ioctl"); perror("ioctl");
exit(1); exit(1);
} }
printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s\n", printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s granule: %d\n",
threads, seconds, node, dir[directions]); threads, seconds, node, dir[directions], granule);
printf("average map latency(us):%.1f standard deviation:%.1f\n", printf("average map latency(us):%.1f standard deviation:%.1f\n",
map.avg_map_100ns/10.0, map.map_stddev/10.0); map.avg_map_100ns/10.0, map.map_stddev/10.0);
printf("average unmap latency(us):%.1f standard deviation:%.1f\n", printf("average unmap latency(us):%.1f standard deviation:%.1f\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment