Commit 38eeeb51 authored by Rebecca Schultz Zavin's avatar Rebecca Schultz Zavin Committed by Greg Kroah-Hartman

gpu: ion: Clarify variable names and comments around heap ids v types

There is some confusion between when to use the heap type and when
the id.  This patch clarifies this by using clearer variable names
and describing the intention in the comments.  Also fixes the client
debug code to print heaps by id instead of type.
Signed-off-by: default avatarRebecca Schultz Zavin <rebecca@android.com>
[jstultz: modified patch to apply to staging directory]
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e3c2eb7c
/* /*
* drivers/staging/android/ion/ion.c * drivers/staging/android/ion/ion.c
* *
* Copyright (C) 2011 Google, Inc. * Copyright (C) 2011 Google, Inc.
...@@ -62,7 +63,7 @@ struct ion_device { ...@@ -62,7 +63,7 @@ struct ion_device {
* @dev: backpointer to ion device * @dev: backpointer to ion device
* @handles: an rb tree of all the handles in this client * @handles: an rb tree of all the handles in this client
* @lock: lock protecting the tree of handles * @lock: lock protecting the tree of handles
* @heap_mask: mask of all supported heaps * @heap_type_mask: mask of all supported heap types
* @name: used for debugging * @name: used for debugging
* @task: used for debugging * @task: used for debugging
* *
...@@ -75,7 +76,7 @@ struct ion_client { ...@@ -75,7 +76,7 @@ struct ion_client {
struct ion_device *dev; struct ion_device *dev;
struct rb_root handles; struct rb_root handles;
struct mutex lock; struct mutex lock;
unsigned int heap_mask; unsigned int heap_type_mask;
const char *name; const char *name;
struct task_struct *task; struct task_struct *task;
pid_t pid; pid_t pid;
...@@ -386,7 +387,7 @@ static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) ...@@ -386,7 +387,7 @@ static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
} }
struct ion_handle *ion_alloc(struct ion_client *client, size_t len, struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
size_t align, unsigned int heap_mask, size_t align, unsigned int heap_id_mask,
unsigned int flags) unsigned int flags)
{ {
struct ion_handle *handle; struct ion_handle *handle;
...@@ -394,8 +395,8 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, ...@@ -394,8 +395,8 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
struct ion_buffer *buffer = NULL; struct ion_buffer *buffer = NULL;
struct ion_heap *heap; struct ion_heap *heap;
pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len, pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
align, heap_mask, flags); len, align, heap_id_mask, flags);
/* /*
* traverse the list of heaps available in this system in priority * traverse the list of heaps available in this system in priority
* order. If the heap type is supported by the client, and matches the * order. If the heap type is supported by the client, and matches the
...@@ -410,10 +411,10 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, ...@@ -410,10 +411,10 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
down_read(&dev->lock); down_read(&dev->lock);
plist_for_each_entry(heap, &dev->heaps, node) { plist_for_each_entry(heap, &dev->heaps, node) {
/* if the client doesn't support this heap type */ /* if the client doesn't support this heap type */
if (!((1 << heap->type) & client->heap_mask)) if (!((1 << heap->type) & client->heap_type_mask))
continue; continue;
/* if the caller didn't specify this heap type */ /* if the caller didn't specify this heap id */
if (!((1 << heap->id) & heap_mask)) if (!((1 << heap->id) & heap_id_mask))
continue; continue;
buffer = ion_buffer_create(heap, dev, len, align, flags); buffer = ion_buffer_create(heap, dev, len, align, flags);
if (!IS_ERR_OR_NULL(buffer)) if (!IS_ERR_OR_NULL(buffer))
...@@ -588,24 +589,24 @@ static int ion_debug_client_show(struct seq_file *s, void *unused) ...@@ -588,24 +589,24 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
{ {
struct ion_client *client = s->private; struct ion_client *client = s->private;
struct rb_node *n; struct rb_node *n;
size_t sizes[ION_NUM_HEAPS] = {0}; size_t sizes[ION_NUM_HEAP_IDS] = {0};
const char *names[ION_NUM_HEAPS] = {0}; const char *names[ION_NUM_HEAP_IDS] = {0};
int i; int i;
mutex_lock(&client->lock); mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) { for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle, struct ion_handle *handle = rb_entry(n, struct ion_handle,
node); node);
enum ion_heap_type type = handle->buffer->heap->type; unsigned int id = handle->buffer->heap->id;
if (!names[type]) if (!names[id])
names[type] = handle->buffer->heap->name; names[id] = handle->buffer->heap->name;
sizes[type] += handle->buffer->size; sizes[id] += handle->buffer->size;
} }
mutex_unlock(&client->lock); mutex_unlock(&client->lock);
seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
for (i = 0; i < ION_NUM_HEAPS; i++) { for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
if (!names[i]) if (!names[i])
continue; continue;
seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]); seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
...@@ -626,7 +627,7 @@ static const struct file_operations debug_client_fops = { ...@@ -626,7 +627,7 @@ static const struct file_operations debug_client_fops = {
}; };
struct ion_client *ion_client_create(struct ion_device *dev, struct ion_client *ion_client_create(struct ion_device *dev,
unsigned int heap_mask, unsigned int heap_type_mask,
const char *name) const char *name)
{ {
struct ion_client *client; struct ion_client *client;
...@@ -661,7 +662,7 @@ struct ion_client *ion_client_create(struct ion_device *dev, ...@@ -661,7 +662,7 @@ struct ion_client *ion_client_create(struct ion_device *dev,
client->handles = RB_ROOT; client->handles = RB_ROOT;
mutex_init(&client->lock); mutex_init(&client->lock);
client->name = name; client->name = name;
client->heap_mask = heap_mask; client->heap_type_mask = heap_type_mask;
client->task = task; client->task = task;
client->pid = pid; client->pid = pid;
...@@ -1057,7 +1058,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -1057,7 +1058,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&data, (void __user *)arg, sizeof(data))) if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT; return -EFAULT;
data.handle = ion_alloc(client, data.len, data.align, data.handle = ion_alloc(client, data.len, data.align,
data.heap_mask, data.flags); data.heap_id_mask, data.flags);
if (IS_ERR(data.handle)) if (IS_ERR(data.handle))
return PTR_ERR(data.handle); return PTR_ERR(data.handle);
......
...@@ -43,6 +43,8 @@ enum ion_heap_type { ...@@ -43,6 +43,8 @@ enum ion_heap_type {
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) #define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) #define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
/** /**
* heap flags - the lower 16 bits are used by core ion, the upper 16 * heap flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves. * bits are reserved for use by the heaps themselves.
...@@ -71,8 +73,9 @@ struct ion_buffer; ...@@ -71,8 +73,9 @@ struct ion_buffer;
/** /**
* struct ion_platform_heap - defines a heap in the given platform * struct ion_platform_heap - defines a heap in the given platform
* @type: type of the heap from ion_heap_type enum * @type: type of the heap from ion_heap_type enum
* @id: unique identifier for heap. When allocating (lower numbers * @id: unique identifier for heap. When allocating higher numbers
* will be allocated from first) * will be allocated from first. At allocation these are passed
* as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
* @name: used for debug purposes * @name: used for debug purposes
* @base: base address of heap in physical memory if applicable * @base: base address of heap in physical memory if applicable
* @size: size of the heap in bytes if applicable * @size: size of the heap in bytes if applicable
...@@ -117,11 +120,12 @@ void ion_reserve(struct ion_platform_data *data); ...@@ -117,11 +120,12 @@ void ion_reserve(struct ion_platform_data *data);
/** /**
* ion_client_create() - allocate a client and returns it * ion_client_create() - allocate a client and returns it
* @dev: the global ion device * @dev: the global ion device
* @heap_mask: mask of heaps this client can allocate from * @heap_type_mask: mask of heaps this client can allocate from
* @name: used for debugging * @name: used for debugging
*/ */
struct ion_client *ion_client_create(struct ion_device *dev, struct ion_client *ion_client_create(struct ion_device *dev,
unsigned int heap_mask, const char *name); unsigned int heap_type_mask,
const char *name);
/** /**
* ion_client_destroy() - free's a client and all it's handles * ion_client_destroy() - free's a client and all it's handles
...@@ -136,19 +140,20 @@ void ion_client_destroy(struct ion_client *client); ...@@ -136,19 +140,20 @@ void ion_client_destroy(struct ion_client *client);
* ion_alloc - allocate ion memory * ion_alloc - allocate ion memory
* @client: the client * @client: the client
* @len: size of the allocation * @len: size of the allocation
* @align: requested allocation alignment, lots of hardware blocks have * @align: requested allocation alignment, lots of hardware blocks
* alignment requirements of some kind * have alignment requirements of some kind
* @heap_mask: mask of heaps to allocate from, if multiple bits are set * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
* heaps will be tried in order from lowest to highest order bit * heaps will be tried in order from highest to lowest
* @flags: heap flags, the low 16 bits are consumed by ion, the high 16 * id
* bits are passed on to the respective heap and can be heap * @flags: heap flags, the low 16 bits are consumed by ion, the
* custom * high 16 bits are passed on to the respective heap and
* can be heap custom
* *
* Allocate memory in one of the heaps provided in heap mask and return * Allocate memory in one of the heaps provided in heap mask and return
* an opaque handle to it. * an opaque handle to it.
*/ */
struct ion_handle *ion_alloc(struct ion_client *client, size_t len, struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
size_t align, unsigned int heap_mask, size_t align, unsigned int heap_id_mask,
unsigned int flags); unsigned int flags);
/** /**
...@@ -239,17 +244,17 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); ...@@ -239,17 +244,17 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
* struct ion_allocation_data - metadata passed from userspace for allocations * struct ion_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation * @len: size of the allocation
* @align: required alignment of the allocation * @align: required alignment of the allocation
* @heap_mask: mask of heaps to allocate from * @heap_id_mask: mask of heap ids to allocate from
* @flags: flags passed to heap * @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to refer * @handle: pointer that will be populated with a cookie to use to
* to this allocation * refer to this allocation
* *
* Provided by userspace as an argument to the ioctl * Provided by userspace as an argument to the ioctl
*/ */
struct ion_allocation_data { struct ion_allocation_data {
size_t len; size_t len;
size_t align; size_t align;
unsigned int heap_mask; unsigned int heap_id_mask;
unsigned int flags; unsigned int flags;
struct ion_handle *handle; struct ion_handle *handle;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment