Commit 25d0479a authored by Joe Perches's avatar Joe Perches Committed by Dave Airlie

drm/ttm: Use pr_fmt and pr_<level>

Use the more current logging style.

Add pr_fmt and remove the TTM_PFX uses.
Coalesce formats and align arguments.
Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent f1048765
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
* Keith Packard. * Keith Packard.
*/ */
#define pr_fmt(fmt) "[TTM] " fmt
#include "ttm/ttm_module.h" #include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h" #include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_page_alloc.h" #include "ttm/ttm_page_alloc.h"
...@@ -74,7 +76,7 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) ...@@ -74,7 +76,7 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
ret = agp_bind_memory(mem, node->start); ret = agp_bind_memory(mem, node->start);
if (ret) if (ret)
printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); pr_err("AGP Bind memory failed\n");
return ret; return ret;
} }
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/ */
#define pr_fmt(fmt) "[TTM] " fmt
#include "ttm/ttm_module.h" #include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h" #include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h" #include "ttm/ttm_placement.h"
...@@ -68,15 +70,13 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) ...@@ -68,15 +70,13 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type); pr_err(" has_type: %d\n", man->has_type);
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); pr_err(" use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); pr_err(" flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); pr_err(" size: %llu\n", man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", pr_err(" available_caching: 0x%08X\n", man->available_caching);
man->available_caching); pr_err(" default_caching: 0x%08X\n", man->default_caching);
printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
man->default_caching);
if (mem_type != TTM_PL_SYSTEM) if (mem_type != TTM_PL_SYSTEM)
(*man->func->debug)(man, TTM_PFX); (*man->func->debug)(man, TTM_PFX);
} }
...@@ -86,16 +86,16 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, ...@@ -86,16 +86,16 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
{ {
int i, ret, mem_type; int i, ret, mem_type;
printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n", pr_err("No space for %p (%lu pages, %luK, %luM)\n",
bo, bo->mem.num_pages, bo->mem.size >> 10, bo, bo->mem.num_pages, bo->mem.size >> 10,
bo->mem.size >> 20); bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) { for (i = 0; i < placement->num_placement; i++) {
ret = ttm_mem_type_from_flags(placement->placement[i], ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type); &mem_type);
if (ret) if (ret)
return; return;
printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n", pr_err(" placement[%d]=0x%08X (%d)\n",
i, placement->placement[i], mem_type); i, placement->placement[i], mem_type);
ttm_mem_type_debug(bo->bdev, mem_type); ttm_mem_type_debug(bo->bdev, mem_type);
} }
} }
...@@ -344,7 +344,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) ...@@ -344,7 +344,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
ret = -ENOMEM; ret = -ENOMEM;
break; break;
default: default:
printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); pr_err("Illegal buffer object type\n");
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
...@@ -432,7 +432,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -432,7 +432,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (bo->evicted) { if (bo->evicted) {
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
if (ret) if (ret)
printk(KERN_ERR TTM_PFX "Can not flush read caches\n"); pr_err("Can not flush read caches\n");
bo->evicted = false; bo->evicted = false;
} }
...@@ -734,9 +734,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -734,9 +734,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) { if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX pr_err("Failed to expire sync object before buffer eviction\n");
"Failed to expire sync object before "
"buffer eviction.\n");
} }
goto out; goto out;
} }
...@@ -757,9 +755,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -757,9 +755,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
no_wait_reserve, no_wait_gpu); no_wait_reserve, no_wait_gpu);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS) { if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX pr_err("Failed to find memory space for buffer 0x%p eviction\n",
"Failed to find memory space for " bo);
"buffer 0x%p eviction.\n", bo);
ttm_bo_mem_space_debug(bo, &placement); ttm_bo_mem_space_debug(bo, &placement);
} }
goto out; goto out;
...@@ -769,7 +766,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -769,7 +766,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
no_wait_reserve, no_wait_gpu); no_wait_reserve, no_wait_gpu);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS) if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); pr_err("Buffer eviction failed\n");
ttm_bo_mem_put(bo, &evict_mem); ttm_bo_mem_put(bo, &evict_mem);
goto out; goto out;
} }
...@@ -1180,7 +1177,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, ...@@ -1180,7 +1177,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (ret) { if (ret) {
printk(KERN_ERR TTM_PFX "Out of kernel memory.\n"); pr_err("Out of kernel memory\n");
if (destroy) if (destroy)
(*destroy)(bo); (*destroy)(bo);
else else
...@@ -1191,7 +1188,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, ...@@ -1191,7 +1188,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
size += buffer_start & ~PAGE_MASK; size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) { if (num_pages == 0) {
printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); pr_err("Illegal buffer object size\n");
if (destroy) if (destroy)
(*destroy)(bo); (*destroy)(bo);
else else
...@@ -1342,8 +1339,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, ...@@ -1342,8 +1339,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
if (allow_errors) { if (allow_errors) {
return ret; return ret;
} else { } else {
printk(KERN_ERR TTM_PFX pr_err("Cleanup eviction failed\n");
"Cleanup eviction failed\n");
} }
} }
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
...@@ -1358,14 +1354,14 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ...@@ -1358,14 +1354,14 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
int ret = -EINVAL; int ret = -EINVAL;
if (mem_type >= TTM_NUM_MEM_TYPES) { if (mem_type >= TTM_NUM_MEM_TYPES) {
printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); pr_err("Illegal memory type %d\n", mem_type);
return ret; return ret;
} }
man = &bdev->man[mem_type]; man = &bdev->man[mem_type];
if (!man->has_type) { if (!man->has_type) {
printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " pr_err("Trying to take down uninitialized memory manager type %u\n",
"memory manager type %u\n", mem_type); mem_type);
return ret; return ret;
} }
...@@ -1388,16 +1384,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) ...@@ -1388,16 +1384,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
printk(KERN_ERR TTM_PFX pr_err("Illegal memory manager memory type %u\n", mem_type);
"Illegal memory manager memory type %u.\n",
mem_type);
return -EINVAL; return -EINVAL;
} }
if (!man->has_type) { if (!man->has_type) {
printk(KERN_ERR TTM_PFX pr_err("Memory type %u has not been initialized\n", mem_type);
"Memory type %u has not been initialized.\n",
mem_type);
return 0; return 0;
} }
...@@ -1482,8 +1474,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref) ...@@ -1482,8 +1474,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
printk(KERN_ERR TTM_PFX pr_err("Could not register buffer object swapout\n");
"Could not register buffer object swapout.\n");
goto out_no_shrink; goto out_no_shrink;
} }
...@@ -1516,9 +1507,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) ...@@ -1516,9 +1507,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
man->use_type = false; man->use_type = false;
if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
ret = -EBUSY; ret = -EBUSY;
printk(KERN_ERR TTM_PFX pr_err("DRM memory manager type %d is not clean\n",
"DRM memory manager type %d " i);
"is not clean.\n", i);
} }
man->has_type = false; man->has_type = false;
} }
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/ */
#define pr_fmt(fmt) "[TTM] " fmt
#include <ttm/ttm_module.h> #include <ttm/ttm_module.h>
#include <ttm/ttm_bo_driver.h> #include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h> #include <ttm/ttm_placement.h>
...@@ -262,8 +264,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, ...@@ -262,8 +264,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
read_unlock(&bdev->vm_lock); read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL)) { if (unlikely(bo == NULL)) {
printk(KERN_ERR TTM_PFX pr_err("Could not find buffer object to map\n");
"Could not find buffer object to map.\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
* *
**************************************************************************/ **************************************************************************/
#define pr_fmt(fmt) "[TTM] " fmt
#include "ttm/ttm_memory.h" #include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h" #include "ttm/ttm_module.h"
#include "ttm/ttm_page_alloc.h" #include "ttm/ttm_page_alloc.h"
...@@ -74,9 +76,8 @@ static void ttm_mem_zone_kobj_release(struct kobject *kobj) ...@@ -74,9 +76,8 @@ static void ttm_mem_zone_kobj_release(struct kobject *kobj)
struct ttm_mem_zone *zone = struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj); container_of(kobj, struct ttm_mem_zone, kobj);
printk(KERN_INFO TTM_PFX pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
"Zone %7s: Used memory at exit: %llu kiB.\n", zone->name, (unsigned long long)zone->used_mem >> 10);
zone->name, (unsigned long long) zone->used_mem >> 10);
kfree(zone); kfree(zone);
} }
...@@ -390,9 +391,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) ...@@ -390,9 +391,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
#endif #endif
for (i = 0; i < glob->num_zones; ++i) { for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i]; zone = glob->zones[i];
printk(KERN_INFO TTM_PFX pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
"Zone %7s: Available graphics memory: %llu kiB.\n", zone->name, (unsigned long long)zone->max_mem >> 10);
zone->name, (unsigned long long) zone->max_mem >> 10);
} }
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
......
...@@ -49,6 +49,8 @@ ...@@ -49,6 +49,8 @@
* for fast lookup of ref objects given a base object. * for fast lookup of ref objects given a base object.
*/ */
#define pr_fmt(fmt) "[TTM] " fmt
#include "ttm/ttm_object.h" #include "ttm/ttm_object.h"
#include "ttm/ttm_module.h" #include "ttm/ttm_module.h"
#include <linux/list.h> #include <linux/list.h>
...@@ -232,8 +234,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, ...@@ -232,8 +234,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
return NULL; return NULL;
if (tfile != base->tfile && !base->shareable) { if (tfile != base->tfile && !base->shareable) {
printk(KERN_ERR TTM_PFX pr_err("Attempted access of non-shareable object\n");
"Attempted access of non-shareable object.\n");
ttm_base_object_unref(&base); ttm_base_object_unref(&base);
return NULL; return NULL;
} }
......
...@@ -30,6 +30,9 @@ ...@@ -30,6 +30,9 @@
* - Use page->lru to keep a free list * - Use page->lru to keep a free list
* - doesn't track currently in use pages * - doesn't track currently in use pages
*/ */
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/highmem.h> #include <linux/highmem.h>
...@@ -167,18 +170,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj, ...@@ -167,18 +170,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
m->options.small = val; m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) { else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) { if (val > NUM_PAGES_TO_ALLOC*8) {
printk(KERN_ERR TTM_PFX pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
"Setting allocation size to %lu "
"is not allowed. Recommended size is "
"%lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size; return size;
} else if (val > NUM_PAGES_TO_ALLOC) { } else if (val > NUM_PAGES_TO_ALLOC) {
printk(KERN_WARNING TTM_PFX pr_warn("Setting allocation size to larger than %lu is not recommended\n",
"Setting allocation size to " NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
"larger than %lu is not recommended.\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
} }
m->options.alloc_size = val; m->options.alloc_size = val;
} }
...@@ -279,8 +277,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages) ...@@ -279,8 +277,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
{ {
unsigned i; unsigned i;
if (set_pages_array_wb(pages, npages)) if (set_pages_array_wb(pages, npages))
printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", pr_err("Failed to set %d pages to wb!\n", npages);
npages);
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
__free_page(pages[i]); __free_page(pages[i]);
} }
...@@ -315,8 +312,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) ...@@ -315,8 +312,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL); GFP_KERNEL);
if (!pages_to_free) { if (!pages_to_free) {
printk(KERN_ERR TTM_PFX pr_err("Failed to allocate memory for pool free operation\n");
"Failed to allocate memory for pool free operation.\n");
return 0; return 0;
} }
...@@ -438,16 +434,12 @@ static int ttm_set_pages_caching(struct page **pages, ...@@ -438,16 +434,12 @@ static int ttm_set_pages_caching(struct page **pages,
case tt_uncached: case tt_uncached:
r = set_pages_array_uc(pages, cpages); r = set_pages_array_uc(pages, cpages);
if (r) if (r)
printk(KERN_ERR TTM_PFX pr_err("Failed to set %d pages to uc!\n", cpages);
"Failed to set %d pages to uc!\n",
cpages);
break; break;
case tt_wc: case tt_wc:
r = set_pages_array_wc(pages, cpages); r = set_pages_array_wc(pages, cpages);
if (r) if (r)
printk(KERN_ERR TTM_PFX pr_err("Failed to set %d pages to wc!\n", cpages);
"Failed to set %d pages to wc!\n",
cpages);
break; break;
default: default:
break; break;
...@@ -492,8 +484,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -492,8 +484,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) { if (!caching_array) {
printk(KERN_ERR TTM_PFX pr_err("Unable to allocate table for new pages\n");
"Unable to allocate table for new pages.");
return -ENOMEM; return -ENOMEM;
} }
...@@ -501,7 +492,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -501,7 +492,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
p = alloc_page(gfp_flags); p = alloc_page(gfp_flags);
if (!p) { if (!p) {
printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); pr_err("Unable to get page %u\n", i);
/* store already allocated pages in the pool after /* store already allocated pages in the pool after
* setting the caching state */ * setting the caching state */
...@@ -599,8 +590,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ...@@ -599,8 +590,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills; ++pool->nrefills;
pool->npages += alloc_size; pool->npages += alloc_size;
} else { } else {
printk(KERN_ERR TTM_PFX pr_err("Failed to fill pool (%p)\n", pool);
"Failed to fill pool (%p).", pool);
/* If we have any pages left put them to the pool. */ /* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) { list_for_each_entry(p, &pool->list, lru) {
++cpages; ++cpages;
...@@ -675,9 +665,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ...@@ -675,9 +665,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
if (pages[i]) { if (pages[i]) {
if (page_count(pages[i]) != 1) if (page_count(pages[i]) != 1)
printk(KERN_ERR TTM_PFX pr_err("Erroneous page count. Leaking pages.\n");
"Erroneous page count. "
"Leaking pages.\n");
__free_page(pages[i]); __free_page(pages[i]);
pages[i] = NULL; pages[i] = NULL;
} }
...@@ -689,9 +677,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ...@@ -689,9 +677,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
if (pages[i]) { if (pages[i]) {
if (page_count(pages[i]) != 1) if (page_count(pages[i]) != 1)
printk(KERN_ERR TTM_PFX pr_err("Erroneous page count. Leaking pages.\n");
"Erroneous page count. "
"Leaking pages.\n");
list_add_tail(&pages[i]->lru, &pool->list); list_add_tail(&pages[i]->lru, &pool->list);
pages[i] = NULL; pages[i] = NULL;
pool->npages++; pool->npages++;
...@@ -740,8 +726,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, ...@@ -740,8 +726,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
p = alloc_page(gfp_flags); p = alloc_page(gfp_flags);
if (!p) { if (!p) {
printk(KERN_ERR TTM_PFX pr_err("Unable to allocate page\n");
"Unable to allocate page.");
return -ENOMEM; return -ENOMEM;
} }
...@@ -781,9 +766,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, ...@@ -781,9 +766,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
if (r) { if (r) {
/* If there is any pages in the list put them back to /* If there is any pages in the list put them back to
* the pool. */ * the pool. */
printk(KERN_ERR TTM_PFX pr_err("Failed to allocate extra pages for large request\n");
"Failed to allocate extra pages "
"for large request.");
ttm_put_pages(pages, count, flags, cstate); ttm_put_pages(pages, count, flags, cstate);
return r; return r;
} }
...@@ -809,7 +792,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) ...@@ -809,7 +792,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
WARN_ON(_manager); WARN_ON(_manager);
printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); pr_info("Initializing pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL); _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
...@@ -844,7 +827,7 @@ void ttm_page_alloc_fini(void) ...@@ -844,7 +827,7 @@ void ttm_page_alloc_fini(void)
{ {
int i; int i;
printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); pr_info("Finalizing pool allocator\n");
ttm_pool_mm_shrink_fini(_manager); ttm_pool_mm_shrink_fini(_manager);
for (i = 0; i < NUM_POOLS; ++i) for (i = 0; i < NUM_POOLS; ++i)
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
* when freed). * when freed).
*/ */
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/seq_file.h> /* for seq_printf */ #include <linux/seq_file.h> /* for seq_printf */
...@@ -221,18 +223,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, ...@@ -221,18 +223,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
m->options.small = val; m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) { else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) { if (val > NUM_PAGES_TO_ALLOC*8) {
printk(KERN_ERR TTM_PFX pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
"Setting allocation size to %lu "
"is not allowed. Recommended size is "
"%lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size; return size;
} else if (val > NUM_PAGES_TO_ALLOC) { } else if (val > NUM_PAGES_TO_ALLOC) {
printk(KERN_WARNING TTM_PFX pr_warn("Setting allocation size to larger than %lu is not recommended\n",
"Setting allocation size to " NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
"larger than %lu is not recommended.\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
} }
m->options.alloc_size = val; m->options.alloc_size = val;
} }
...@@ -313,15 +310,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool, ...@@ -313,15 +310,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool,
if (pool->type & IS_UC) { if (pool->type & IS_UC) {
r = set_pages_array_uc(pages, cpages); r = set_pages_array_uc(pages, cpages);
if (r) if (r)
pr_err(TTM_PFX pr_err("%s: Failed to set %d pages to uc!\n",
"%s: Failed to set %d pages to uc!\n",
pool->dev_name, cpages); pool->dev_name, cpages);
} }
if (pool->type & IS_WC) { if (pool->type & IS_WC) {
r = set_pages_array_wc(pages, cpages); r = set_pages_array_wc(pages, cpages);
if (r) if (r)
pr_err(TTM_PFX pr_err("%s: Failed to set %d pages to wc!\n",
"%s: Failed to set %d pages to wc!\n",
pool->dev_name, cpages); pool->dev_name, cpages);
} }
return r; return r;
...@@ -387,8 +382,8 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, ...@@ -387,8 +382,8 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
/* Don't set WB on WB page pool. */ /* Don't set WB on WB page pool. */
if (npages && !(pool->type & IS_CACHED) && if (npages && !(pool->type & IS_CACHED) &&
set_pages_array_wb(pages, npages)) set_pages_array_wb(pages, npages))
pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n", pr_err("%s: Failed to set %d pages to wb!\n",
pool->dev_name, npages); pool->dev_name, npages);
list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
list_del(&d_page->page_list); list_del(&d_page->page_list);
...@@ -400,8 +395,8 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) ...@@ -400,8 +395,8 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
{ {
/* Don't set WB on WB page pool. */ /* Don't set WB on WB page pool. */
if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1)) if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n", pr_err("%s: Failed to set %d pages to wb!\n",
pool->dev_name, 1); pool->dev_name, 1);
list_del(&d_page->page_list); list_del(&d_page->page_list);
__ttm_dma_free_page(pool, d_page); __ttm_dma_free_page(pool, d_page);
...@@ -430,17 +425,16 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) ...@@ -430,17 +425,16 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
#if 0 #if 0
if (nr_free > 1) { if (nr_free > 1) {
pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n", pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
pool->dev_name, pool->name, current->pid, pool->dev_name, pool->name, current->pid,
npages_to_free, nr_free); npages_to_free, nr_free);
} }
#endif #endif
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL); GFP_KERNEL);
if (!pages_to_free) { if (!pages_to_free) {
pr_err(TTM_PFX pr_err("%s: Failed to allocate memory for pool free operation\n",
"%s: Failed to allocate memory for pool free operation.\n", pool->dev_name);
pool->dev_name);
return 0; return 0;
} }
INIT_LIST_HEAD(&d_pages); INIT_LIST_HEAD(&d_pages);
...@@ -723,23 +717,21 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, ...@@ -723,23 +717,21 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) { if (!caching_array) {
pr_err(TTM_PFX pr_err("%s: Unable to allocate table for new pages\n",
"%s: Unable to allocate table for new pages.", pool->dev_name);
pool->dev_name);
return -ENOMEM; return -ENOMEM;
} }
if (count > 1) { if (count > 1) {
pr_debug("%s: (%s:%d) Getting %d pages\n", pr_debug("%s: (%s:%d) Getting %d pages\n",
pool->dev_name, pool->name, current->pid, pool->dev_name, pool->name, current->pid, count);
count);
} }
for (i = 0, cpages = 0; i < count; ++i) { for (i = 0, cpages = 0; i < count; ++i) {
dma_p = __ttm_dma_alloc_page(pool); dma_p = __ttm_dma_alloc_page(pool);
if (!dma_p) { if (!dma_p) {
pr_err(TTM_PFX "%s: Unable to get page %u.\n", pr_err("%s: Unable to get page %u\n",
pool->dev_name, i); pool->dev_name, i);
/* store already allocated pages in the pool after /* store already allocated pages in the pool after
* setting the caching state */ * setting the caching state */
...@@ -821,8 +813,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, ...@@ -821,8 +813,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
struct dma_page *d_page; struct dma_page *d_page;
unsigned cpages = 0; unsigned cpages = 0;
pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n", pr_err("%s: Failed to fill %s pool (r:%d)!\n",
pool->dev_name, pool->name, r); pool->dev_name, pool->name, r);
list_for_each_entry(d_page, &d_pages, page_list) { list_for_each_entry(d_page, &d_pages, page_list) {
cpages++; cpages++;
...@@ -1038,8 +1030,8 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink, ...@@ -1038,8 +1030,8 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
nr_free = shrink_pages; nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid, nr_free, p->pool->dev_name, p->pool->name, current->pid,
shrink_pages); nr_free, shrink_pages);
} }
mutex_unlock(&_manager->lock); mutex_unlock(&_manager->lock);
/* return estimated number of unused pages in pool */ /* return estimated number of unused pages in pool */
...@@ -1064,7 +1056,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) ...@@ -1064,7 +1056,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
WARN_ON(_manager); WARN_ON(_manager);
printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n"); pr_info("Initializing DMA pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL); _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
if (!_manager) if (!_manager)
...@@ -1097,7 +1089,7 @@ void ttm_dma_page_alloc_fini(void) ...@@ -1097,7 +1089,7 @@ void ttm_dma_page_alloc_fini(void)
{ {
struct device_pools *p, *t; struct device_pools *p, *t;
printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n"); pr_info("Finalizing DMA pool allocator\n");
ttm_dma_pool_mm_shrink_fini(_manager); ttm_dma_pool_mm_shrink_fini(_manager);
list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/ */
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
...@@ -196,7 +198,7 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, ...@@ -196,7 +198,7 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
ttm_tt_alloc_page_directory(ttm); ttm_tt_alloc_page_directory(ttm);
if (!ttm->pages) { if (!ttm->pages) {
ttm_tt_destroy(ttm); ttm_tt_destroy(ttm);
printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); pr_err("Failed allocating page table\n");
return -ENOMEM; return -ENOMEM;
} }
return 0; return 0;
...@@ -229,7 +231,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, ...@@ -229,7 +231,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
ttm_dma_tt_alloc_page_directory(ttm_dma); ttm_dma_tt_alloc_page_directory(ttm_dma);
if (!ttm->pages || !ttm_dma->dma_address) { if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm); ttm_tt_destroy(ttm);
printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); pr_err("Failed allocating page table\n");
return -ENOMEM; return -ENOMEM;
} }
return 0; return 0;
...@@ -347,7 +349,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) ...@@ -347,7 +349,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
ttm->num_pages << PAGE_SHIFT, ttm->num_pages << PAGE_SHIFT,
0); 0);
if (unlikely(IS_ERR(swap_storage))) { if (unlikely(IS_ERR(swap_storage))) {
printk(KERN_ERR "Failed allocating swap storage.\n"); pr_err("Failed allocating swap storage\n");
return PTR_ERR(swap_storage); return PTR_ERR(swap_storage);
} }
} else } else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment