Commit 90612317 authored by Hans de Goede's avatar Hans de Goede Committed by Mauro Carvalho Chehab

media: atomisp: drop unused ATOMISP_MAP_FLAG_* flags

Drop the ATOMISP_MAP_FLAG_CACHED flag, it is never set anywhere;
also drop the matching "cached" parameter to hmm[_bo]_alloc which
value was derived form the never set flag.

Drop the ATOMISP_MAP_FLAG_NOFLUSH, it is not used anywhere.

Link: https://lore.kernel.org/linux-media/20220615205037.16549-27-hdegoede@redhat.comReviewed-by: default avatarAndy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: default avatarHans de Goede <hdegoede@redhat.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@kernel.org>
parent 9f40d3a7
...@@ -221,7 +221,7 @@ int hmm_bo_allocated(struct hmm_buffer_object *bo); ...@@ -221,7 +221,7 @@ int hmm_bo_allocated(struct hmm_buffer_object *bo);
*/ */
int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
enum hmm_bo_type type, int from_highmem, enum hmm_bo_type type, int from_highmem,
const void __user *userptr, bool cached); const void __user *userptr);
void hmm_bo_free_pages(struct hmm_buffer_object *bo); void hmm_bo_free_pages(struct hmm_buffer_object *bo);
int hmm_bo_page_allocated(struct hmm_buffer_object *bo); int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
......
...@@ -825,9 +825,6 @@ struct atomisp_s_runmode { ...@@ -825,9 +825,6 @@ struct atomisp_s_runmode {
__u32 mode; __u32 mode;
}; };
#define ATOMISP_MAP_FLAG_NOFLUSH 0x0001 /* Do not flush cache */
#define ATOMISP_MAP_FLAG_CACHED 0x0002 /* Enable cache */
struct atomisp_update_exposure { struct atomisp_update_exposure {
unsigned int gain; unsigned int gain;
unsigned int digi_gain; unsigned int digi_gain;
......
...@@ -174,7 +174,6 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, ...@@ -174,7 +174,6 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
{ {
unsigned int pgnr; unsigned int pgnr;
struct hmm_buffer_object *bo; struct hmm_buffer_object *bo;
bool cached = attrs & ATOMISP_MAP_FLAG_CACHED;
int ret; int ret;
/* /*
...@@ -195,7 +194,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, ...@@ -195,7 +194,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
} }
/* Allocate pages for memory */ /* Allocate pages for memory */
ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached); ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr);
if (ret) { if (ret) {
dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n"); dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
goto alloc_page_err; goto alloc_page_err;
...@@ -209,8 +208,8 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, ...@@ -209,8 +208,8 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
} }
dev_dbg(atomisp_dev, dev_dbg(atomisp_dev,
"%s: pages: 0x%08x (%zu bytes), type: %d from highmem %d, user ptr %p, cached %d\n", "%s: pages: 0x%08x (%zu bytes), type: %d from highmem %d, user ptr %p\n",
__func__, bo->start, bytes, type, from_highmem, userptr, cached); __func__, bo->start, bytes, type, from_highmem, userptr);
return bo->start; return bo->start;
......
...@@ -651,8 +651,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo, ...@@ -651,8 +651,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo,
/*Allocate pages which will be used only by ISP*/ /*Allocate pages which will be used only by ISP*/
static int alloc_private_pages(struct hmm_buffer_object *bo, static int alloc_private_pages(struct hmm_buffer_object *bo,
int from_highmem, int from_highmem)
bool cached)
{ {
int ret; int ret;
unsigned int pgnr, order, blk_pgnr, alloc_pgnr; unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
...@@ -730,7 +729,6 @@ static int alloc_private_pages(struct hmm_buffer_object *bo, ...@@ -730,7 +729,6 @@ static int alloc_private_pages(struct hmm_buffer_object *bo,
} else { } else {
blk_pgnr = order_to_nr(order); blk_pgnr = order_to_nr(order);
if (!cached) {
/* /*
* set memory to uncacheable -- UC_MINUS * set memory to uncacheable -- UC_MINUS
*/ */
...@@ -743,7 +741,6 @@ static int alloc_private_pages(struct hmm_buffer_object *bo, ...@@ -743,7 +741,6 @@ static int alloc_private_pages(struct hmm_buffer_object *bo,
goto cleanup; goto cleanup;
} }
}
for (j = 0; j < blk_pgnr; j++, i++) { for (j = 0; j < blk_pgnr; j++, i++) {
bo->page_obj[i].page = pages + j; bo->page_obj[i].page = pages + j;
...@@ -797,7 +794,7 @@ static void free_user_pages(struct hmm_buffer_object *bo, ...@@ -797,7 +794,7 @@ static void free_user_pages(struct hmm_buffer_object *bo,
* Convert user space virtual address into pages list * Convert user space virtual address into pages list
*/ */
static int alloc_user_pages(struct hmm_buffer_object *bo, static int alloc_user_pages(struct hmm_buffer_object *bo,
const void __user *userptr, bool cached) const void __user *userptr)
{ {
int page_nr; int page_nr;
int i; int i;
...@@ -895,7 +892,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, ...@@ -895,7 +892,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
*/ */
int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
enum hmm_bo_type type, int from_highmem, enum hmm_bo_type type, int from_highmem,
const void __user *userptr, bool cached) const void __user *userptr)
{ {
int ret = -EINVAL; int ret = -EINVAL;
...@@ -909,9 +906,9 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, ...@@ -909,9 +906,9 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
* add HMM_BO_USER type * add HMM_BO_USER type
*/ */
if (type == HMM_BO_PRIVATE) { if (type == HMM_BO_PRIVATE) {
ret = alloc_private_pages(bo, from_highmem, cached); ret = alloc_private_pages(bo, from_highmem);
} else if (type == HMM_BO_USER) { } else if (type == HMM_BO_USER) {
ret = alloc_user_pages(bo, userptr, cached); ret = alloc_user_pages(bo, userptr);
} else { } else {
dev_err(atomisp_dev, "invalid buffer type.\n"); dev_err(atomisp_dev, "invalid buffer type.\n");
ret = -EINVAL; ret = -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment