Commit 01ce18b3 authored by Florian Fainelli's avatar Florian Fainelli Committed by Linus Torvalds

dma-debug: introduce dma_debug_disabled

Add a helper function which returns whether the DMA debugging API is
disabled, right now we only check for global_disable, but in order to
accommodate early callers of the DMA-API, we will check for more
initialization flags in the next patch.
Signed-off-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Horia Geanta <horia.geanta@freescale.com>
Cc: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 662e9b2b
...@@ -102,6 +102,11 @@ static DEFINE_SPINLOCK(free_entries_lock); ...@@ -102,6 +102,11 @@ static DEFINE_SPINLOCK(free_entries_lock);
/* Global disable flag - will be set in case of an error */ /* Global disable flag - will be set in case of an error */
static u32 global_disable __read_mostly; static u32 global_disable __read_mostly;
static inline bool dma_debug_disabled(void)
{
return global_disable;
}
/* Global error count */ /* Global error count */
static u32 error_count; static u32 error_count;
...@@ -945,7 +950,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti ...@@ -945,7 +950,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
struct dma_debug_entry *uninitialized_var(entry); struct dma_debug_entry *uninitialized_var(entry);
int count; int count;
if (global_disable) if (dma_debug_disabled())
return 0; return 0;
switch (action) { switch (action) {
...@@ -973,7 +978,7 @@ void dma_debug_add_bus(struct bus_type *bus) ...@@ -973,7 +978,7 @@ void dma_debug_add_bus(struct bus_type *bus)
{ {
struct notifier_block *nb; struct notifier_block *nb;
if (global_disable) if (dma_debug_disabled())
return; return;
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
...@@ -994,7 +999,7 @@ void dma_debug_init(u32 num_entries) ...@@ -994,7 +999,7 @@ void dma_debug_init(u32 num_entries)
{ {
int i; int i;
if (global_disable) if (dma_debug_disabled())
return; return;
for (i = 0; i < HASH_SIZE; ++i) { for (i = 0; i < HASH_SIZE; ++i) {
...@@ -1243,7 +1248,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, ...@@ -1243,7 +1248,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
{ {
struct dma_debug_entry *entry; struct dma_debug_entry *entry;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
if (dma_mapping_error(dev, dma_addr)) if (dma_mapping_error(dev, dma_addr))
...@@ -1283,7 +1288,7 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -1283,7 +1288,7 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
struct hash_bucket *bucket; struct hash_bucket *bucket;
unsigned long flags; unsigned long flags;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
ref.dev = dev; ref.dev = dev;
...@@ -1325,7 +1330,7 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, ...@@ -1325,7 +1330,7 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
.direction = direction, .direction = direction,
}; };
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
if (map_single) if (map_single)
...@@ -1342,7 +1347,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -1342,7 +1347,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct scatterlist *s; struct scatterlist *s;
int i; int i;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
for_each_sg(sg, s, mapped_ents, i) { for_each_sg(sg, s, mapped_ents, i) {
...@@ -1395,7 +1400,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -1395,7 +1400,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *s; struct scatterlist *s;
int mapped_ents = 0, i; int mapped_ents = 0, i;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
for_each_sg(sglist, s, nelems, i) { for_each_sg(sglist, s, nelems, i) {
...@@ -1427,7 +1432,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -1427,7 +1432,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
{ {
struct dma_debug_entry *entry; struct dma_debug_entry *entry;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
if (unlikely(virt == NULL)) if (unlikely(virt == NULL))
...@@ -1462,7 +1467,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, ...@@ -1462,7 +1467,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
.direction = DMA_BIDIRECTIONAL, .direction = DMA_BIDIRECTIONAL,
}; };
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
check_unmap(&ref); check_unmap(&ref);
...@@ -1474,7 +1479,7 @@ void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, ...@@ -1474,7 +1479,7 @@ void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
{ {
struct dma_debug_entry ref; struct dma_debug_entry ref;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
ref.type = dma_debug_single; ref.type = dma_debug_single;
...@@ -1494,7 +1499,7 @@ void debug_dma_sync_single_for_device(struct device *dev, ...@@ -1494,7 +1499,7 @@ void debug_dma_sync_single_for_device(struct device *dev,
{ {
struct dma_debug_entry ref; struct dma_debug_entry ref;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
ref.type = dma_debug_single; ref.type = dma_debug_single;
...@@ -1515,7 +1520,7 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev, ...@@ -1515,7 +1520,7 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
{ {
struct dma_debug_entry ref; struct dma_debug_entry ref;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
ref.type = dma_debug_single; ref.type = dma_debug_single;
...@@ -1536,7 +1541,7 @@ void debug_dma_sync_single_range_for_device(struct device *dev, ...@@ -1536,7 +1541,7 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
{ {
struct dma_debug_entry ref; struct dma_debug_entry ref;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
ref.type = dma_debug_single; ref.type = dma_debug_single;
...@@ -1556,7 +1561,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -1556,7 +1561,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
struct scatterlist *s; struct scatterlist *s;
int mapped_ents = 0, i; int mapped_ents = 0, i;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
for_each_sg(sg, s, nelems, i) { for_each_sg(sg, s, nelems, i) {
...@@ -1589,7 +1594,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -1589,7 +1594,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
struct scatterlist *s; struct scatterlist *s;
int mapped_ents = 0, i; int mapped_ents = 0, i;
if (unlikely(global_disable)) if (unlikely(dma_debug_disabled()))
return; return;
for_each_sg(sg, s, nelems, i) { for_each_sg(sg, s, nelems, i) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment