Commit 338c09a9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'topic/omap3isp' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media

Pull OMAP3 updates from Mauro Carvalho Chehab:
 "Some driver improvements on OMAP3.  This series depend on some iommu
  patches already merged"

* 'topic/omap3isp' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media: (26 commits)
  [media] omap3isp: Rename isp_buffer isp_addr field to dma
  [media] omap3isp: Move to videobuf2
  [media] v4l: vb2: Add a function to discard all DONE buffers
  [media] omap3isp: Cancel all queued buffers when stopping the video stream
  [media] omap3isp: Move buffer irqlist to isp_buffer structure
  [media] omap3isp: Move queue irqlock to isp_video structure
  [media] omap3isp: Move queue mutex to isp_video structure
  [media] omap3isp: queue: Don't build scatterlist for kernel buffer
  [media] omap3isp: Use the ARM DMA IOMMU-aware operations
  [media] omap3isp: queue: Use sg_alloc_table_from_pages()
  [media] omap3isp: queue: Map PFNMAP buffers to device
  [media] omap3isp: queue: Fix the dma_map_sg() return value check
  [media] omap3isp: queue: Allocate kernel buffers with dma_alloc_coherent
  [media] omap3isp: queue: Inline the ispmmu_v(un)map functions
  [media] omap3isp: queue: Merge the prepare and sglist functions
  [media] omap3isp: queue: Use sg_table structure
  [media] omap3isp: queue: Move IOMMU handling code to the queue
  [media] omap3isp: video: Set the buffer bytesused field at completion time
  [media] omap3isp: ccdc: Use the DMA API for FPC
  [media] omap3isp: ccdc: Use the DMA API for LSC
  ...
parents ed9ea4ed 21d8582d
...@@ -93,7 +93,9 @@ config VIDEO_M32R_AR_M64278 ...@@ -93,7 +93,9 @@ config VIDEO_M32R_AR_M64278
config VIDEO_OMAP3 config VIDEO_OMAP3
tristate "OMAP 3 Camera support" tristate "OMAP 3 Camera support"
depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
select ARM_DMA_USE_IOMMU
select OMAP_IOMMU
---help--- ---help---
Driver for an OMAP 3 camera controller. Driver for an OMAP 3 camera controller.
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG
omap3-isp-objs += \ omap3-isp-objs += \
isp.o ispqueue.o ispvideo.o \ isp.o ispvideo.o \
ispcsiphy.o ispccp2.o ispcsi2.o \ ispcsiphy.o ispccp2.o ispcsi2.o \
ispccdc.o isppreview.o ispresizer.o \ ispccdc.o isppreview.o ispresizer.o \
ispstat.o isph3a_aewb.o isph3a_af.o isphist.o ispstat.o isph3a_aewb.o isph3a_af.o isphist.o
......
...@@ -69,6 +69,8 @@ ...@@ -69,6 +69,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <asm/dma-iommu.h>
#include <media/v4l2-common.h> #include <media/v4l2-common.h>
#include <media/v4l2-device.h> #include <media/v4l2-device.h>
...@@ -1397,14 +1399,14 @@ int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait, ...@@ -1397,14 +1399,14 @@ int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
if (isp_pipeline_is_last(me)) { if (isp_pipeline_is_last(me)) {
struct isp_video *video = pipe->output; struct isp_video *video = pipe->output;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&video->queue->irqlock, flags); spin_lock_irqsave(&video->irqlock, flags);
if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) { if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
spin_unlock_irqrestore(&video->queue->irqlock, flags); spin_unlock_irqrestore(&video->irqlock, flags);
atomic_set(stopping, 0); atomic_set(stopping, 0);
smp_mb(); smp_mb();
return 0; return 0;
} }
spin_unlock_irqrestore(&video->queue->irqlock, flags); spin_unlock_irqrestore(&video->irqlock, flags);
if (!wait_event_timeout(*wait, !atomic_read(stopping), if (!wait_event_timeout(*wait, !atomic_read(stopping),
msecs_to_jiffies(1000))) { msecs_to_jiffies(1000))) {
atomic_set(stopping, 0); atomic_set(stopping, 0);
...@@ -1625,7 +1627,7 @@ struct isp_device *omap3isp_get(struct isp_device *isp) ...@@ -1625,7 +1627,7 @@ struct isp_device *omap3isp_get(struct isp_device *isp)
* Decrement the reference count on the ISP. If the last reference is released, * Decrement the reference count on the ISP. If the last reference is released,
* power-down all submodules, disable clocks and free temporary buffers. * power-down all submodules, disable clocks and free temporary buffers.
*/ */
void omap3isp_put(struct isp_device *isp) static void __omap3isp_put(struct isp_device *isp, bool save_ctx)
{ {
if (isp == NULL) if (isp == NULL)
return; return;
...@@ -1634,7 +1636,7 @@ void omap3isp_put(struct isp_device *isp) ...@@ -1634,7 +1636,7 @@ void omap3isp_put(struct isp_device *isp)
BUG_ON(isp->ref_count == 0); BUG_ON(isp->ref_count == 0);
if (--isp->ref_count == 0) { if (--isp->ref_count == 0) {
isp_disable_interrupts(isp); isp_disable_interrupts(isp);
if (isp->domain) { if (save_ctx) {
isp_save_ctx(isp); isp_save_ctx(isp);
isp->has_context = 1; isp->has_context = 1;
} }
...@@ -1648,6 +1650,11 @@ void omap3isp_put(struct isp_device *isp) ...@@ -1648,6 +1650,11 @@ void omap3isp_put(struct isp_device *isp)
mutex_unlock(&isp->isp_mutex); mutex_unlock(&isp->isp_mutex);
} }
void omap3isp_put(struct isp_device *isp)
{
__omap3isp_put(isp, true);
}
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
* Platform device driver * Platform device driver
*/ */
...@@ -2120,6 +2127,61 @@ static int isp_initialize_modules(struct isp_device *isp) ...@@ -2120,6 +2127,61 @@ static int isp_initialize_modules(struct isp_device *isp)
return ret; return ret;
} }
static void isp_detach_iommu(struct isp_device *isp)
{
arm_iommu_release_mapping(isp->mapping);
isp->mapping = NULL;
iommu_group_remove_device(isp->dev);
}
static int isp_attach_iommu(struct isp_device *isp)
{
struct dma_iommu_mapping *mapping;
struct iommu_group *group;
int ret;
/* Create a device group and add the device to it. */
group = iommu_group_alloc();
if (IS_ERR(group)) {
dev_err(isp->dev, "failed to allocate IOMMU group\n");
return PTR_ERR(group);
}
ret = iommu_group_add_device(group, isp->dev);
iommu_group_put(group);
if (ret < 0) {
dev_err(isp->dev, "failed to add device to IPMMU group\n");
return ret;
}
/*
* Create the ARM mapping, used by the ARM DMA mapping core to allocate
* VAs. This will allocate a corresponding IOMMU domain.
*/
mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
if (IS_ERR(mapping)) {
dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
ret = PTR_ERR(mapping);
goto error;
}
isp->mapping = mapping;
/* Attach the ARM VA mapping to the device. */
ret = arm_iommu_attach_device(isp->dev, mapping);
if (ret < 0) {
dev_err(isp->dev, "failed to attach device to VA mapping\n");
goto error;
}
return 0;
error:
isp_detach_iommu(isp);
return ret;
}
/* /*
* isp_remove - Remove ISP platform device * isp_remove - Remove ISP platform device
* @pdev: Pointer to ISP platform device * @pdev: Pointer to ISP platform device
...@@ -2135,10 +2197,8 @@ static int isp_remove(struct platform_device *pdev) ...@@ -2135,10 +2197,8 @@ static int isp_remove(struct platform_device *pdev)
isp_xclk_cleanup(isp); isp_xclk_cleanup(isp);
__omap3isp_get(isp, false); __omap3isp_get(isp, false);
iommu_detach_device(isp->domain, &pdev->dev); isp_detach_iommu(isp);
iommu_domain_free(isp->domain); __omap3isp_put(isp, false);
isp->domain = NULL;
omap3isp_put(isp);
return 0; return 0;
} }
...@@ -2265,39 +2325,32 @@ static int isp_probe(struct platform_device *pdev) ...@@ -2265,39 +2325,32 @@ static int isp_probe(struct platform_device *pdev)
} }
} }
isp->domain = iommu_domain_alloc(pdev->dev.bus); /* IOMMU */
if (!isp->domain) { ret = isp_attach_iommu(isp);
dev_err(isp->dev, "can't alloc iommu domain\n"); if (ret < 0) {
ret = -ENOMEM; dev_err(&pdev->dev, "unable to attach to IOMMU\n");
goto error_isp; goto error_isp;
} }
ret = iommu_attach_device(isp->domain, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
ret = -EPROBE_DEFER;
goto free_domain;
}
/* Interrupt */ /* Interrupt */
isp->irq_num = platform_get_irq(pdev, 0); isp->irq_num = platform_get_irq(pdev, 0);
if (isp->irq_num <= 0) { if (isp->irq_num <= 0) {
dev_err(isp->dev, "No IRQ resource\n"); dev_err(isp->dev, "No IRQ resource\n");
ret = -ENODEV; ret = -ENODEV;
goto detach_dev; goto error_iommu;
} }
if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED, if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
"OMAP3 ISP", isp)) { "OMAP3 ISP", isp)) {
dev_err(isp->dev, "Unable to request IRQ\n"); dev_err(isp->dev, "Unable to request IRQ\n");
ret = -EINVAL; ret = -EINVAL;
goto detach_dev; goto error_iommu;
} }
/* Entities */ /* Entities */
ret = isp_initialize_modules(isp); ret = isp_initialize_modules(isp);
if (ret < 0) if (ret < 0)
goto detach_dev; goto error_iommu;
ret = isp_register_entities(isp); ret = isp_register_entities(isp);
if (ret < 0) if (ret < 0)
...@@ -2310,14 +2363,11 @@ static int isp_probe(struct platform_device *pdev) ...@@ -2310,14 +2363,11 @@ static int isp_probe(struct platform_device *pdev)
error_modules: error_modules:
isp_cleanup_modules(isp); isp_cleanup_modules(isp);
detach_dev: error_iommu:
iommu_detach_device(isp->domain, &pdev->dev); isp_detach_iommu(isp);
free_domain:
iommu_domain_free(isp->domain);
isp->domain = NULL;
error_isp: error_isp:
isp_xclk_cleanup(isp); isp_xclk_cleanup(isp);
omap3isp_put(isp); __omap3isp_put(isp, false);
error: error:
mutex_destroy(&isp->isp_mutex); mutex_destroy(&isp->isp_mutex);
......
...@@ -45,8 +45,6 @@ ...@@ -45,8 +45,6 @@
#include "ispcsi2.h" #include "ispcsi2.h"
#include "ispccp2.h" #include "ispccp2.h"
#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
#define ISP_TOK_TERM 0xFFFFFFFF /* #define ISP_TOK_TERM 0xFFFFFFFF /*
* terminating token for ISP * terminating token for ISP
* modules reg list * modules reg list
...@@ -152,6 +150,7 @@ struct isp_xclk { ...@@ -152,6 +150,7 @@ struct isp_xclk {
* regions. * regions.
* @mmio_base_phys: Array with physical L4 bus addresses for ISP register * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
* regions. * regions.
* @mapping: IOMMU mapping
* @stat_lock: Spinlock for handling statistics * @stat_lock: Spinlock for handling statistics
* @isp_mutex: Mutex for serializing requests to ISP. * @isp_mutex: Mutex for serializing requests to ISP.
* @stop_failure: Indicates that an entity failed to stop. * @stop_failure: Indicates that an entity failed to stop.
...@@ -171,7 +170,6 @@ struct isp_xclk { ...@@ -171,7 +170,6 @@ struct isp_xclk {
* @isp_res: Pointer to current settings for ISP Resizer. * @isp_res: Pointer to current settings for ISP Resizer.
* @isp_prev: Pointer to current settings for ISP Preview. * @isp_prev: Pointer to current settings for ISP Preview.
* @isp_ccdc: Pointer to current settings for ISP CCDC. * @isp_ccdc: Pointer to current settings for ISP CCDC.
* @iommu: Pointer to requested IOMMU instance for ISP.
* @platform_cb: ISP driver callback function pointers for platform code * @platform_cb: ISP driver callback function pointers for platform code
* *
* This structure is used to store the OMAP ISP Information. * This structure is used to store the OMAP ISP Information.
...@@ -189,6 +187,8 @@ struct isp_device { ...@@ -189,6 +187,8 @@ struct isp_device {
void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST]; void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST]; unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
struct dma_iommu_mapping *mapping;
/* ISP Obj */ /* ISP Obj */
spinlock_t stat_lock; /* common lock for statistic drivers */ spinlock_t stat_lock; /* common lock for statistic drivers */
struct mutex isp_mutex; /* For handling ref_count field */ struct mutex isp_mutex; /* For handling ref_count field */
...@@ -219,8 +219,6 @@ struct isp_device { ...@@ -219,8 +219,6 @@ struct isp_device {
unsigned int sbl_resources; unsigned int sbl_resources;
unsigned int subclk_resources; unsigned int subclk_resources;
struct iommu_domain *domain;
}; };
#define v4l2_dev_to_isp_device(dev) \ #define v4l2_dev_to_isp_device(dev) \
......
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/omap-iommu.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <media/v4l2-event.h> #include <media/v4l2-event.h>
...@@ -206,7 +205,8 @@ static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc, ...@@ -206,7 +205,8 @@ static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
* ccdc_lsc_program_table - Program Lens Shading Compensation table address. * ccdc_lsc_program_table - Program Lens Shading Compensation table address.
* @ccdc: Pointer to ISP CCDC device. * @ccdc: Pointer to ISP CCDC device.
*/ */
static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc, u32 addr) static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc,
dma_addr_t addr)
{ {
isp_reg_writel(to_isp_device(ccdc), addr, isp_reg_writel(to_isp_device(ccdc), addr,
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE); OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE);
...@@ -333,7 +333,7 @@ static int __ccdc_lsc_configure(struct isp_ccdc_device *ccdc, ...@@ -333,7 +333,7 @@ static int __ccdc_lsc_configure(struct isp_ccdc_device *ccdc,
return -EBUSY; return -EBUSY;
ccdc_lsc_setup_regs(ccdc, &req->config); ccdc_lsc_setup_regs(ccdc, &req->config);
ccdc_lsc_program_table(ccdc, req->table); ccdc_lsc_program_table(ccdc, req->table.dma);
return 0; return 0;
} }
...@@ -368,11 +368,12 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc, ...@@ -368,11 +368,12 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
if (req == NULL) if (req == NULL)
return; return;
if (req->iovm) if (req->table.addr) {
dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, sg_free_table(&req->table.sgt);
req->iovm->sgt->nents, DMA_TO_DEVICE); dma_free_coherent(isp->dev, req->config.size, req->table.addr,
if (req->table) req->table.dma);
omap_iommu_vfree(isp->domain, isp->dev, req->table); }
kfree(req); kfree(req);
} }
...@@ -416,7 +417,6 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, ...@@ -416,7 +417,6 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
struct isp_device *isp = to_isp_device(ccdc); struct isp_device *isp = to_isp_device(ccdc);
struct ispccdc_lsc_config_req *req; struct ispccdc_lsc_config_req *req;
unsigned long flags; unsigned long flags;
void *table;
u16 update; u16 update;
int ret; int ret;
...@@ -444,38 +444,31 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, ...@@ -444,38 +444,31 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
req->enable = 1; req->enable = 1;
req->table = omap_iommu_vmalloc(isp->domain, isp->dev, 0, req->table.addr = dma_alloc_coherent(isp->dev, req->config.size,
req->config.size, IOMMU_FLAG); &req->table.dma,
if (IS_ERR_VALUE(req->table)) { GFP_KERNEL);
req->table = 0; if (req->table.addr == NULL) {
ret = -ENOMEM;
goto done;
}
req->iovm = omap_find_iovm_area(isp->dev, req->table);
if (req->iovm == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
} }
if (!dma_map_sg(isp->dev, req->iovm->sgt->sgl, ret = dma_get_sgtable(isp->dev, &req->table.sgt,
req->iovm->sgt->nents, DMA_TO_DEVICE)) { req->table.addr, req->table.dma,
ret = -ENOMEM; req->config.size);
req->iovm = NULL; if (ret < 0)
goto done; goto done;
}
dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl, dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE); req->table.sgt.nents, DMA_TO_DEVICE);
table = omap_da_to_va(isp->dev, req->table); if (copy_from_user(req->table.addr, config->lsc,
if (copy_from_user(table, config->lsc, req->config.size)) { req->config.size)) {
ret = -EFAULT; ret = -EFAULT;
goto done; goto done;
} }
dma_sync_sg_for_device(isp->dev, req->iovm->sgt->sgl, dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE); req->table.sgt.nents, DMA_TO_DEVICE);
} }
spin_lock_irqsave(&ccdc->lsc.req_lock, flags); spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
...@@ -584,7 +577,7 @@ static void ccdc_configure_fpc(struct isp_ccdc_device *ccdc) ...@@ -584,7 +577,7 @@ static void ccdc_configure_fpc(struct isp_ccdc_device *ccdc)
if (!ccdc->fpc_en) if (!ccdc->fpc_en)
return; return;
isp_reg_writel(isp, ccdc->fpc.fpcaddr, OMAP3_ISP_IOMEM_CCDC, isp_reg_writel(isp, ccdc->fpc.dma, OMAP3_ISP_IOMEM_CCDC,
ISPCCDC_FPC_ADDR); ISPCCDC_FPC_ADDR);
/* The FPNUM field must be set before enabling FPC. */ /* The FPNUM field must be set before enabling FPC. */
isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT), isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT),
...@@ -724,8 +717,9 @@ static int ccdc_config(struct isp_ccdc_device *ccdc, ...@@ -724,8 +717,9 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
ccdc->shadow_update = 0; ccdc->shadow_update = 0;
if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) { if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) {
u32 table_old = 0; struct omap3isp_ccdc_fpc fpc;
u32 table_new; struct ispccdc_fpc fpc_old = { .addr = NULL, };
struct ispccdc_fpc fpc_new;
u32 size; u32 size;
if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED) if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
...@@ -734,35 +728,39 @@ static int ccdc_config(struct isp_ccdc_device *ccdc, ...@@ -734,35 +728,39 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag); ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag);
if (ccdc->fpc_en) { if (ccdc->fpc_en) {
if (copy_from_user(&ccdc->fpc, ccdc_struct->fpc, if (copy_from_user(&fpc, ccdc_struct->fpc, sizeof(fpc)))
sizeof(ccdc->fpc)))
return -EFAULT; return -EFAULT;
size = fpc.fpnum * 4;
/* /*
* table_new must be 64-bytes aligned, but it's * The table address must be 64-bytes aligned, which is
* already done by omap_iommu_vmalloc(). * guaranteed by dma_alloc_coherent().
*/ */
size = ccdc->fpc.fpnum * 4; fpc_new.fpnum = fpc.fpnum;
table_new = omap_iommu_vmalloc(isp->domain, isp->dev, fpc_new.addr = dma_alloc_coherent(isp->dev, size,
0, size, IOMMU_FLAG); &fpc_new.dma,
if (IS_ERR_VALUE(table_new)) GFP_KERNEL);
if (fpc_new.addr == NULL)
return -ENOMEM; return -ENOMEM;
if (copy_from_user(omap_da_to_va(isp->dev, table_new), if (copy_from_user(fpc_new.addr,
(__force void __user *) (__force void __user *)fpc.fpcaddr,
ccdc->fpc.fpcaddr, size)) { size)) {
omap_iommu_vfree(isp->domain, isp->dev, dma_free_coherent(isp->dev, size, fpc_new.addr,
table_new); fpc_new.dma);
return -EFAULT; return -EFAULT;
} }
table_old = ccdc->fpc.fpcaddr; fpc_old = ccdc->fpc;
ccdc->fpc.fpcaddr = table_new; ccdc->fpc = fpc_new;
} }
ccdc_configure_fpc(ccdc); ccdc_configure_fpc(ccdc);
if (table_old != 0)
omap_iommu_vfree(isp->domain, isp->dev, table_old); if (fpc_old.addr != NULL)
dma_free_coherent(isp->dev, fpc_old.fpnum * 4,
fpc_old.addr, fpc_old.dma);
} }
return ccdc_lsc_config(ccdc, ccdc_struct); return ccdc_lsc_config(ccdc, ccdc_struct);
...@@ -1523,7 +1521,7 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc) ...@@ -1523,7 +1521,7 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
buffer = omap3isp_video_buffer_next(&ccdc->video_out); buffer = omap3isp_video_buffer_next(&ccdc->video_out);
if (buffer != NULL) { if (buffer != NULL) {
ccdc_set_outaddr(ccdc, buffer->isp_addr); ccdc_set_outaddr(ccdc, buffer->dma);
restart = 1; restart = 1;
} }
...@@ -1662,7 +1660,7 @@ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer) ...@@ -1662,7 +1660,7 @@ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
if (!(ccdc->output & CCDC_OUTPUT_MEMORY)) if (!(ccdc->output & CCDC_OUTPUT_MEMORY))
return -ENODEV; return -ENODEV;
ccdc_set_outaddr(ccdc, buffer->isp_addr); ccdc_set_outaddr(ccdc, buffer->dma);
/* We now have a buffer queued on the output, restart the pipeline /* We now have a buffer queued on the output, restart the pipeline
* on the next CCDC interrupt if running in continuous mode (or when * on the next CCDC interrupt if running in continuous mode (or when
...@@ -2580,8 +2578,9 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp) ...@@ -2580,8 +2578,9 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
cancel_work_sync(&ccdc->lsc.table_work); cancel_work_sync(&ccdc->lsc.table_work);
ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
if (ccdc->fpc.fpcaddr != 0) if (ccdc->fpc.addr != NULL)
omap_iommu_vfree(isp->domain, isp->dev, ccdc->fpc.fpcaddr); dma_free_coherent(isp->dev, ccdc->fpc.fpnum * 4, ccdc->fpc.addr,
ccdc->fpc.dma);
mutex_destroy(&ccdc->ioctl_lock); mutex_destroy(&ccdc->ioctl_lock);
} }
...@@ -46,6 +46,12 @@ enum ccdc_input_entity { ...@@ -46,6 +46,12 @@ enum ccdc_input_entity {
#define OMAP3ISP_CCDC_NEVENTS 16 #define OMAP3ISP_CCDC_NEVENTS 16
struct ispccdc_fpc {
void *addr;
dma_addr_t dma;
unsigned int fpnum;
};
enum ispccdc_lsc_state { enum ispccdc_lsc_state {
LSC_STATE_STOPPED = 0, LSC_STATE_STOPPED = 0,
LSC_STATE_STOPPING = 1, LSC_STATE_STOPPING = 1,
...@@ -57,8 +63,12 @@ struct ispccdc_lsc_config_req { ...@@ -57,8 +63,12 @@ struct ispccdc_lsc_config_req {
struct list_head list; struct list_head list;
struct omap3isp_ccdc_lsc_config config; struct omap3isp_ccdc_lsc_config config;
unsigned char enable; unsigned char enable;
u32 table;
struct iovm_struct *iovm; struct {
void *addr;
dma_addr_t dma;
struct sg_table sgt;
} table;
}; };
/* /*
...@@ -136,7 +146,7 @@ struct isp_ccdc_device { ...@@ -136,7 +146,7 @@ struct isp_ccdc_device {
fpc_en:1; fpc_en:1;
struct omap3isp_ccdc_blcomp blcomp; struct omap3isp_ccdc_blcomp blcomp;
struct omap3isp_ccdc_bclamp clamp; struct omap3isp_ccdc_bclamp clamp;
struct omap3isp_ccdc_fpc fpc; struct ispccdc_fpc fpc;
struct ispccdc_lsc lsc; struct ispccdc_lsc lsc;
unsigned int update; unsigned int update;
unsigned int shadow_update; unsigned int shadow_update;
......
...@@ -549,7 +549,7 @@ static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2) ...@@ -549,7 +549,7 @@ static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2)
buffer = omap3isp_video_buffer_next(&ccp2->video_in); buffer = omap3isp_video_buffer_next(&ccp2->video_in);
if (buffer != NULL) if (buffer != NULL)
ccp2_set_inaddr(ccp2, buffer->isp_addr); ccp2_set_inaddr(ccp2, buffer->dma);
pipe->state |= ISP_PIPELINE_IDLE_INPUT; pipe->state |= ISP_PIPELINE_IDLE_INPUT;
...@@ -940,7 +940,7 @@ static int ccp2_video_queue(struct isp_video *video, struct isp_buffer *buffer) ...@@ -940,7 +940,7 @@ static int ccp2_video_queue(struct isp_video *video, struct isp_buffer *buffer)
{ {
struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2; struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2;
ccp2_set_inaddr(ccp2, buffer->isp_addr); ccp2_set_inaddr(ccp2, buffer->dma);
return 0; return 0;
} }
......
...@@ -695,7 +695,7 @@ static void csi2_isr_buffer(struct isp_csi2_device *csi2) ...@@ -695,7 +695,7 @@ static void csi2_isr_buffer(struct isp_csi2_device *csi2)
if (buffer == NULL) if (buffer == NULL)
return; return;
csi2_set_outaddr(csi2, buffer->isp_addr); csi2_set_outaddr(csi2, buffer->dma);
csi2_ctx_enable(isp, csi2, 0, 1); csi2_ctx_enable(isp, csi2, 0, 1);
} }
...@@ -812,7 +812,7 @@ static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer) ...@@ -812,7 +812,7 @@ static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer)
struct isp_device *isp = video->isp; struct isp_device *isp = video->isp;
struct isp_csi2_device *csi2 = &isp->isp_csi2a; struct isp_csi2_device *csi2 = &isp->isp_csi2a;
csi2_set_outaddr(csi2, buffer->isp_addr); csi2_set_outaddr(csi2, buffer->dma);
/* /*
* If streaming was enabled before there was a buffer queued * If streaming was enabled before there was a buffer queued
......
...@@ -47,7 +47,7 @@ static void h3a_aewb_setup_regs(struct ispstat *aewb, void *priv) ...@@ -47,7 +47,7 @@ static void h3a_aewb_setup_regs(struct ispstat *aewb, void *priv)
if (aewb->state == ISPSTAT_DISABLED) if (aewb->state == ISPSTAT_DISABLED)
return; return;
isp_reg_writel(aewb->isp, aewb->active_buf->iommu_addr, isp_reg_writel(aewb->isp, aewb->active_buf->dma_addr,
OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST); OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST);
if (!aewb->update) if (!aewb->update)
......
...@@ -51,7 +51,7 @@ static void h3a_af_setup_regs(struct ispstat *af, void *priv) ...@@ -51,7 +51,7 @@ static void h3a_af_setup_regs(struct ispstat *af, void *priv)
if (af->state == ISPSTAT_DISABLED) if (af->state == ISPSTAT_DISABLED)
return; return;
isp_reg_writel(af->isp, af->active_buf->iommu_addr, OMAP3_ISP_IOMEM_H3A, isp_reg_writel(af->isp, af->active_buf->dma_addr, OMAP3_ISP_IOMEM_H3A,
ISPH3A_AFBUFST); ISPH3A_AFBUFST);
if (!af->update) if (!af->update)
......
...@@ -1499,14 +1499,14 @@ static void preview_isr_buffer(struct isp_prev_device *prev) ...@@ -1499,14 +1499,14 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
if (prev->input == PREVIEW_INPUT_MEMORY) { if (prev->input == PREVIEW_INPUT_MEMORY) {
buffer = omap3isp_video_buffer_next(&prev->video_in); buffer = omap3isp_video_buffer_next(&prev->video_in);
if (buffer != NULL) if (buffer != NULL)
preview_set_inaddr(prev, buffer->isp_addr); preview_set_inaddr(prev, buffer->dma);
pipe->state |= ISP_PIPELINE_IDLE_INPUT; pipe->state |= ISP_PIPELINE_IDLE_INPUT;
} }
if (prev->output & PREVIEW_OUTPUT_MEMORY) { if (prev->output & PREVIEW_OUTPUT_MEMORY) {
buffer = omap3isp_video_buffer_next(&prev->video_out); buffer = omap3isp_video_buffer_next(&prev->video_out);
if (buffer != NULL) { if (buffer != NULL) {
preview_set_outaddr(prev, buffer->isp_addr); preview_set_outaddr(prev, buffer->dma);
restart = 1; restart = 1;
} }
pipe->state |= ISP_PIPELINE_IDLE_OUTPUT; pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
...@@ -1577,10 +1577,10 @@ static int preview_video_queue(struct isp_video *video, ...@@ -1577,10 +1577,10 @@ static int preview_video_queue(struct isp_video *video,
struct isp_prev_device *prev = &video->isp->isp_prev; struct isp_prev_device *prev = &video->isp->isp_prev;
if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
preview_set_inaddr(prev, buffer->isp_addr); preview_set_inaddr(prev, buffer->dma);
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
preview_set_outaddr(prev, buffer->isp_addr); preview_set_outaddr(prev, buffer->dma);
return 0; return 0;
} }
......
This diff is collapsed.
/*
* ispqueue.h
*
* TI OMAP3 ISP - Video buffers queue handling
*
* Copyright (C) 2010 Nokia Corporation
*
* Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#ifndef OMAP3_ISP_QUEUE_H
#define OMAP3_ISP_QUEUE_H
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm_types.h>
#include <linux/mutex.h>
#include <linux/videodev2.h>
#include <linux/wait.h>
struct isp_video_queue;
struct page;
struct scatterlist;
#define ISP_VIDEO_MAX_BUFFERS 16
/**
* enum isp_video_buffer_state - ISP video buffer state
* @ISP_BUF_STATE_IDLE: The buffer is under userspace control (dequeued
* or not queued yet).
* @ISP_BUF_STATE_QUEUED: The buffer has been queued but isn't used by the
* device yet.
* @ISP_BUF_STATE_ACTIVE: The buffer is in use for an active video transfer.
* @ISP_BUF_STATE_ERROR: The device is done with the buffer and an error
* occurred. For capture device the buffer likely contains corrupted data or
* no data at all.
* @ISP_BUF_STATE_DONE: The device is done with the buffer and no error occurred.
* For capture devices the buffer contains valid data.
*/
enum isp_video_buffer_state {
ISP_BUF_STATE_IDLE,
ISP_BUF_STATE_QUEUED,
ISP_BUF_STATE_ACTIVE,
ISP_BUF_STATE_ERROR,
ISP_BUF_STATE_DONE,
};
/**
* struct isp_video_buffer - ISP video buffer
* @vma_use_count: Number of times the buffer is mmap'ed to userspace
* @stream: List head for insertion into main queue
* @queue: ISP buffers queue this buffer belongs to
* @prepared: Whether the buffer has been prepared
* @skip_cache: Whether to skip cache management operations for this buffer
* @vaddr: Memory virtual address (for kernel buffers)
* @vm_flags: Buffer VMA flags (for userspace buffers)
* @offset: Offset inside the first page (for userspace buffers)
* @npages: Number of pages (for userspace buffers)
* @pages: Pages table (for userspace non-VM_PFNMAP buffers)
* @paddr: Memory physical address (for userspace VM_PFNMAP buffers)
* @sglen: Number of elements in the scatter list (for non-VM_PFNMAP buffers)
* @sglist: Scatter list (for non-VM_PFNMAP buffers)
* @vbuf: V4L2 buffer
* @irqlist: List head for insertion into IRQ queue
* @state: Current buffer state
* @wait: Wait queue to signal buffer completion
*/
struct isp_video_buffer {
unsigned long vma_use_count;
struct list_head stream;
struct isp_video_queue *queue;
unsigned int prepared:1;
bool skip_cache;
/* For kernel buffers. */
void *vaddr;
/* For userspace buffers. */
vm_flags_t vm_flags;
unsigned long offset;
unsigned int npages;
struct page **pages;
dma_addr_t paddr;
/* For all buffers except VM_PFNMAP. */
unsigned int sglen;
struct scatterlist *sglist;
/* Touched by the interrupt handler. */
struct v4l2_buffer vbuf;
struct list_head irqlist;
enum isp_video_buffer_state state;
wait_queue_head_t wait;
};
#define to_isp_video_buffer(vb) container_of(vb, struct isp_video_buffer, vb)
/**
* struct isp_video_queue_operations - Driver-specific operations
* @queue_prepare: Called before allocating buffers. Drivers should clamp the
* number of buffers according to their requirements, and must return the
* buffer size in bytes.
* @buffer_prepare: Called the first time a buffer is queued, or after changing
* the userspace memory address for a USERPTR buffer, with the queue lock
* held. Drivers should perform device-specific buffer preparation (such as
* mapping the buffer memory in an IOMMU). This operation is optional.
* @buffer_queue: Called when a buffer is being added to the queue with the
* queue irqlock spinlock held.
* @buffer_cleanup: Called before freeing buffers, or before changing the
* userspace memory address for a USERPTR buffer, with the queue lock held.
* Drivers must perform cleanup operations required to undo the
* buffer_prepare call. This operation is optional.
*/
struct isp_video_queue_operations {
void (*queue_prepare)(struct isp_video_queue *queue,
unsigned int *nbuffers, unsigned int *size);
int (*buffer_prepare)(struct isp_video_buffer *buf);
void (*buffer_queue)(struct isp_video_buffer *buf);
void (*buffer_cleanup)(struct isp_video_buffer *buf);
};
/**
* struct isp_video_queue - ISP video buffers queue
* @type: Type of video buffers handled by this queue
* @ops: Queue operations
* @dev: Device used for DMA operations
* @bufsize: Size of a driver-specific buffer object
* @count: Number of currently allocated buffers
* @buffers: ISP video buffers
* @lock: Mutex to protect access to the buffers, main queue and state
* @irqlock: Spinlock to protect access to the IRQ queue
* @streaming: Queue state, indicates whether the queue is streaming
* @queue: List of all queued buffers
*/
struct isp_video_queue {
enum v4l2_buf_type type;
const struct isp_video_queue_operations *ops;
struct device *dev;
unsigned int bufsize;
unsigned int count;
struct isp_video_buffer *buffers[ISP_VIDEO_MAX_BUFFERS];
struct mutex lock;
spinlock_t irqlock;
unsigned int streaming:1;
struct list_head queue;
};
int omap3isp_video_queue_cleanup(struct isp_video_queue *queue);
int omap3isp_video_queue_init(struct isp_video_queue *queue,
enum v4l2_buf_type type,
const struct isp_video_queue_operations *ops,
struct device *dev, unsigned int bufsize);
int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
struct v4l2_requestbuffers *rb);
int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
struct v4l2_buffer *vbuf);
int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
struct v4l2_buffer *vbuf);
int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
struct v4l2_buffer *vbuf, int nonblocking);
int omap3isp_video_queue_streamon(struct isp_video_queue *queue);
void omap3isp_video_queue_streamoff(struct isp_video_queue *queue);
void omap3isp_video_queue_discard_done(struct isp_video_queue *queue);
int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
struct vm_area_struct *vma);
unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
struct file *file, poll_table *wait);
#endif /* OMAP3_ISP_QUEUE_H */
...@@ -1040,7 +1040,7 @@ static void resizer_isr_buffer(struct isp_res_device *res) ...@@ -1040,7 +1040,7 @@ static void resizer_isr_buffer(struct isp_res_device *res)
*/ */
buffer = omap3isp_video_buffer_next(&res->video_out); buffer = omap3isp_video_buffer_next(&res->video_out);
if (buffer != NULL) { if (buffer != NULL) {
resizer_set_outaddr(res, buffer->isp_addr); resizer_set_outaddr(res, buffer->dma);
restart = 1; restart = 1;
} }
...@@ -1049,7 +1049,7 @@ static void resizer_isr_buffer(struct isp_res_device *res) ...@@ -1049,7 +1049,7 @@ static void resizer_isr_buffer(struct isp_res_device *res)
if (res->input == RESIZER_INPUT_MEMORY) { if (res->input == RESIZER_INPUT_MEMORY) {
buffer = omap3isp_video_buffer_next(&res->video_in); buffer = omap3isp_video_buffer_next(&res->video_in);
if (buffer != NULL) if (buffer != NULL)
resizer_set_inaddr(res, buffer->isp_addr); resizer_set_inaddr(res, buffer->dma);
pipe->state |= ISP_PIPELINE_IDLE_INPUT; pipe->state |= ISP_PIPELINE_IDLE_INPUT;
} }
...@@ -1101,7 +1101,7 @@ static int resizer_video_queue(struct isp_video *video, ...@@ -1101,7 +1101,7 @@ static int resizer_video_queue(struct isp_video *video,
struct isp_res_device *res = &video->isp->isp_res; struct isp_res_device *res = &video->isp->isp_res;
if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
resizer_set_inaddr(res, buffer->isp_addr); resizer_set_inaddr(res, buffer->dma);
/* /*
* We now have a buffer queued on the output. Despite what the * We now have a buffer queued on the output. Despite what the
...@@ -1116,7 +1116,7 @@ static int resizer_video_queue(struct isp_video *video, ...@@ -1116,7 +1116,7 @@ static int resizer_video_queue(struct isp_video *video,
* continuous mode or when starting the stream. * continuous mode or when starting the stream.
*/ */
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
resizer_set_outaddr(res, buffer->isp_addr); resizer_set_outaddr(res, buffer->dma);
return 0; return 0;
} }
......
...@@ -26,13 +26,12 @@ ...@@ -26,13 +26,12 @@
*/ */
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/omap-iommu.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include "isp.h" #include "isp.h"
#define IS_COHERENT_BUF(stat) ((stat)->dma_ch >= 0) #define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch >= 0)
/* /*
* MAGIC_SIZE must always be the greatest common divisor of * MAGIC_SIZE must always be the greatest common divisor of
...@@ -77,21 +76,10 @@ static void __isp_stat_buf_sync_magic(struct ispstat *stat, ...@@ -77,21 +76,10 @@ static void __isp_stat_buf_sync_magic(struct ispstat *stat,
dma_addr_t, unsigned long, size_t, dma_addr_t, unsigned long, size_t,
enum dma_data_direction)) enum dma_data_direction))
{ {
struct device *dev = stat->isp->dev; /* Sync the initial and final magic words. */
struct page *pg; dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir);
dma_addr_t dma_addr; dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK),
u32 offset; buf_size & ~PAGE_MASK, MAGIC_SIZE, dir);
/* Initial magic words */
pg = vmalloc_to_page(buf->virt_addr);
dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);
/* Final magic words */
pg = vmalloc_to_page(buf->virt_addr + buf_size);
dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
} }
static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
...@@ -99,7 +87,7 @@ static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, ...@@ -99,7 +87,7 @@ static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
u32 buf_size, u32 buf_size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (IS_COHERENT_BUF(stat)) if (ISP_STAT_USES_DMAENGINE(stat))
return; return;
__isp_stat_buf_sync_magic(stat, buf, buf_size, dir, __isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
...@@ -111,7 +99,7 @@ static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat, ...@@ -111,7 +99,7 @@ static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
u32 buf_size, u32 buf_size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (IS_COHERENT_BUF(stat)) if (ISP_STAT_USES_DMAENGINE(stat))
return; return;
__isp_stat_buf_sync_magic(stat, buf, buf_size, dir, __isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
...@@ -180,21 +168,21 @@ static void isp_stat_buf_insert_magic(struct ispstat *stat, ...@@ -180,21 +168,21 @@ static void isp_stat_buf_insert_magic(struct ispstat *stat,
static void isp_stat_buf_sync_for_device(struct ispstat *stat, static void isp_stat_buf_sync_for_device(struct ispstat *stat,
struct ispstat_buffer *buf) struct ispstat_buffer *buf)
{ {
if (IS_COHERENT_BUF(stat)) if (ISP_STAT_USES_DMAENGINE(stat))
return; return;
dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl, dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
buf->iovm->sgt->nents, DMA_FROM_DEVICE); buf->sgt.nents, DMA_FROM_DEVICE);
} }
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
struct ispstat_buffer *buf) struct ispstat_buffer *buf)
{ {
if (IS_COHERENT_BUF(stat)) if (ISP_STAT_USES_DMAENGINE(stat))
return; return;
dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl, dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
buf->iovm->sgt->nents, DMA_FROM_DEVICE); buf->sgt.nents, DMA_FROM_DEVICE);
} }
static void isp_stat_buf_clear(struct ispstat *stat) static void isp_stat_buf_clear(struct ispstat *stat)
...@@ -354,29 +342,21 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat, ...@@ -354,29 +342,21 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
static void isp_stat_bufs_free(struct ispstat *stat) static void isp_stat_bufs_free(struct ispstat *stat)
{ {
struct isp_device *isp = stat->isp; struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
int i; ? NULL : stat->isp->dev;
unsigned int i;
for (i = 0; i < STAT_MAX_BUFS; i++) { for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *buf = &stat->buf[i]; struct ispstat_buffer *buf = &stat->buf[i];
if (!IS_COHERENT_BUF(stat)) { if (!buf->virt_addr)
if (IS_ERR_OR_NULL((void *)buf->iommu_addr)) continue;
continue;
if (buf->iovm) sg_free_table(&buf->sgt);
dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents, dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr,
DMA_FROM_DEVICE); buf->dma_addr);
omap_iommu_vfree(isp->domain, isp->dev,
buf->iommu_addr);
} else {
if (!buf->virt_addr)
continue;
dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
buf->virt_addr, buf->dma_addr);
}
buf->iommu_addr = 0;
buf->iovm = NULL;
buf->dma_addr = 0; buf->dma_addr = 0;
buf->virt_addr = NULL; buf->virt_addr = NULL;
buf->empty = 1; buf->empty = 1;
...@@ -389,83 +369,51 @@ static void isp_stat_bufs_free(struct ispstat *stat) ...@@ -389,83 +369,51 @@ static void isp_stat_bufs_free(struct ispstat *stat)
stat->active_buf = NULL; stat->active_buf = NULL;
} }
static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) static int isp_stat_bufs_alloc_one(struct device *dev,
{ struct ispstat_buffer *buf,
struct isp_device *isp = stat->isp; unsigned int size)
int i;
stat->buf_alloc_size = size;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *buf = &stat->buf[i];
struct iovm_struct *iovm;
WARN_ON(buf->dma_addr);
buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
size, IOMMU_FLAG);
if (IS_ERR((void *)buf->iommu_addr)) {
dev_err(stat->isp->dev,
"%s: Can't acquire memory for "
"buffer %d\n", stat->subdev.name, i);
isp_stat_bufs_free(stat);
return -ENOMEM;
}
iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
if (!iovm ||
!dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
DMA_FROM_DEVICE)) {
isp_stat_bufs_free(stat);
return -ENOMEM;
}
buf->iovm = iovm;
buf->virt_addr = omap_da_to_va(stat->isp->dev,
(u32)buf->iommu_addr);
buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
"iommu_addr=0x%08lx virt_addr=0x%08lx",
stat->subdev.name, i, buf->iommu_addr,
(unsigned long)buf->virt_addr);
}
return 0;
}
static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size)
{ {
int i; int ret;
stat->buf_alloc_size = size;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *buf = &stat->buf[i];
WARN_ON(buf->iommu_addr);
buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size,
&buf->dma_addr, GFP_KERNEL | GFP_DMA);
if (!buf->virt_addr || !buf->dma_addr) { buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr,
dev_info(stat->isp->dev, GFP_KERNEL | GFP_DMA);
"%s: Can't acquire memory for " if (!buf->virt_addr)
"DMA buffer %d\n", stat->subdev.name, i); return -ENOMEM;
isp_stat_bufs_free(stat);
return -ENOMEM;
}
buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." ret = dma_get_sgtable(dev, &buf->sgt, buf->virt_addr, buf->dma_addr,
"dma_addr=0x%08lx virt_addr=0x%08lx\n", size);
stat->subdev.name, i, (unsigned long)buf->dma_addr, if (ret < 0) {
(unsigned long)buf->virt_addr); dma_free_coherent(dev, size, buf->virt_addr, buf->dma_addr);
buf->virt_addr = NULL;
buf->dma_addr = 0;
return ret;
} }
return 0; return 0;
} }
/*
* The device passed to the DMA API depends on whether the statistics block uses
* ISP DMA, external DMA or PIO to transfer data.
*
* The first case (for the AEWB and AF engines) passes the ISP device, resulting
* in the DMA buffers being mapped through the ISP IOMMU.
*
* The second case (for the histogram engine) should pass the DMA engine device.
* As that device isn't accessible through the OMAP DMA engine API the driver
* passes NULL instead, resulting in the buffers being mapped directly as
* physical pages.
*
* The third case (for the histogram engine) doesn't require any mapping. The
* buffers could be allocated with kmalloc/vmalloc, but we still use
* dma_alloc_coherent() for consistency purpose.
*/
static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
{ {
struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
? NULL : stat->isp->dev;
unsigned long flags; unsigned long flags;
unsigned int i;
spin_lock_irqsave(&stat->isp->stat_lock, flags); spin_lock_irqsave(&stat->isp->stat_lock, flags);
...@@ -489,10 +437,31 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) ...@@ -489,10 +437,31 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
isp_stat_bufs_free(stat); isp_stat_bufs_free(stat);
if (IS_COHERENT_BUF(stat)) stat->buf_alloc_size = size;
return isp_stat_bufs_alloc_dma(stat, size);
else for (i = 0; i < STAT_MAX_BUFS; i++) {
return isp_stat_bufs_alloc_iommu(stat, size); struct ispstat_buffer *buf = &stat->buf[i];
int ret;
ret = isp_stat_bufs_alloc_one(dev, buf, size);
if (ret < 0) {
dev_err(stat->isp->dev,
"%s: Failed to allocate DMA buffer %u\n",
stat->subdev.name, i);
isp_stat_bufs_free(stat);
return ret;
}
buf->empty = 1;
dev_dbg(stat->isp->dev,
"%s: buffer[%u] allocated. dma=0x%08lx virt=0x%08lx",
stat->subdev.name, i,
(unsigned long)buf->dma_addr,
(unsigned long)buf->virt_addr);
}
return 0;
} }
static void isp_stat_queue_event(struct ispstat *stat, int err) static void isp_stat_queue_event(struct ispstat *stat, int err)
......
...@@ -46,8 +46,7 @@ ...@@ -46,8 +46,7 @@
struct ispstat; struct ispstat;
struct ispstat_buffer { struct ispstat_buffer {
unsigned long iommu_addr; struct sg_table sgt;
struct iovm_struct *iovm;
void *virt_addr; void *virt_addr;
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct timespec ts; struct timespec ts;
......
This diff is collapsed.
...@@ -30,8 +30,7 @@ ...@@ -30,8 +30,7 @@
#include <media/media-entity.h> #include <media/media-entity.h>
#include <media/v4l2-dev.h> #include <media/v4l2-dev.h>
#include <media/v4l2-fh.h> #include <media/v4l2-fh.h>
#include <media/videobuf2-core.h>
#include "ispqueue.h"
#define ISP_VIDEO_DRIVER_NAME "ispvideo" #define ISP_VIDEO_DRIVER_NAME "ispvideo"
#define ISP_VIDEO_DRIVER_VERSION "0.0.2" #define ISP_VIDEO_DRIVER_VERSION "0.0.2"
...@@ -124,17 +123,19 @@ static inline int isp_pipeline_ready(struct isp_pipeline *pipe) ...@@ -124,17 +123,19 @@ static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
ISP_PIPELINE_IDLE_OUTPUT); ISP_PIPELINE_IDLE_OUTPUT);
} }
/* /**
* struct isp_buffer - ISP buffer * struct isp_buffer - ISP video buffer
* @buffer: ISP video buffer * @vb: videobuf2 buffer
* @isp_addr: MMU mapped address (a.k.a. device address) of the buffer. * @irqlist: List head for insertion into IRQ queue
* @dma: DMA address
*/ */
struct isp_buffer { struct isp_buffer {
struct isp_video_buffer buffer; struct vb2_buffer vb;
dma_addr_t isp_addr; struct list_head irqlist;
dma_addr_t dma;
}; };
#define to_isp_buffer(buf) container_of(buf, struct isp_buffer, buffer) #define to_isp_buffer(buf) container_of(buf, struct isp_buffer, vb)
enum isp_video_dmaqueue_flags { enum isp_video_dmaqueue_flags {
/* Set if DMA queue becomes empty when ISP_PIPELINE_STREAM_CONTINUOUS */ /* Set if DMA queue becomes empty when ISP_PIPELINE_STREAM_CONTINUOUS */
...@@ -172,16 +173,16 @@ struct isp_video { ...@@ -172,16 +173,16 @@ struct isp_video {
unsigned int bpl_value; /* bytes per line value */ unsigned int bpl_value; /* bytes per line value */
unsigned int bpl_padding; /* padding at end of line */ unsigned int bpl_padding; /* padding at end of line */
/* Entity video node streaming */
unsigned int streaming:1;
/* Pipeline state */ /* Pipeline state */
struct isp_pipeline pipe; struct isp_pipeline pipe;
struct mutex stream_lock; /* pipeline and stream states */ struct mutex stream_lock; /* pipeline and stream states */
bool error; bool error;
/* Video buffers queue */ /* Video buffers queue */
struct isp_video_queue *queue; void *alloc_ctx;
struct vb2_queue *queue;
struct mutex queue_lock; /* protects the queue */
spinlock_t irqlock; /* protects dmaqueue */
struct list_head dmaqueue; struct list_head dmaqueue;
enum isp_video_dmaqueue_flags dmaqueue_flags; enum isp_video_dmaqueue_flags dmaqueue_flags;
...@@ -193,7 +194,7 @@ struct isp_video { ...@@ -193,7 +194,7 @@ struct isp_video {
struct isp_video_fh { struct isp_video_fh {
struct v4l2_fh vfh; struct v4l2_fh vfh;
struct isp_video *video; struct isp_video *video;
struct isp_video_queue queue; struct vb2_queue queue;
struct v4l2_format format; struct v4l2_format format;
struct v4l2_fract timeperframe; struct v4l2_fract timeperframe;
}; };
......
...@@ -1199,6 +1199,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) ...@@ -1199,6 +1199,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
} }
EXPORT_SYMBOL_GPL(vb2_buffer_done); EXPORT_SYMBOL_GPL(vb2_buffer_done);
/**
* vb2_discard_done() - discard all buffers marked as DONE
* @q: videobuf2 queue
*
* This function is intended to be used with suspend/resume operations. It
* discards all 'done' buffers as they would be too old to be requested after
* resume.
*
* Drivers must stop the hardware and synchronize with interrupt handlers and/or
* delayed works before calling this function to make sure no buffer will be
* touched by the driver and/or hardware.
*/
void vb2_discard_done(struct vb2_queue *q)
{
struct vb2_buffer *vb;
unsigned long flags;
spin_lock_irqsave(&q->done_lock, flags);
list_for_each_entry(vb, &q->done_list, done_entry)
vb->state = VB2_BUF_STATE_ERROR;
spin_unlock_irqrestore(&q->done_lock, flags);
}
EXPORT_SYMBOL_GPL(vb2_discard_done);
/** /**
* __fill_vb2_buffer() - fill a vb2_buffer with information provided in a * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
* v4l2_buffer by the userspace. The caller has already verified that struct * v4l2_buffer by the userspace. The caller has already verified that struct
......
...@@ -396,7 +396,7 @@ static void iss_video_buf_queue(struct vb2_buffer *vb) ...@@ -396,7 +396,7 @@ static void iss_video_buf_queue(struct vb2_buffer *vb)
} }
} }
static struct vb2_ops iss_video_vb2ops = { static const struct vb2_ops iss_video_vb2ops = {
.queue_setup = iss_video_queue_setup, .queue_setup = iss_video_queue_setup,
.buf_prepare = iss_video_buf_prepare, .buf_prepare = iss_video_buf_prepare,
.buf_queue = iss_video_buf_queue, .buf_queue = iss_video_buf_queue,
......
...@@ -432,6 +432,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no); ...@@ -432,6 +432,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
void vb2_discard_done(struct vb2_queue *q);
int vb2_wait_for_all_buffers(struct vb2_queue *q); int vb2_wait_for_all_buffers(struct vb2_queue *q);
int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment