Commit 767601d1 authored by Michel Dänzer's avatar Michel Dänzer Committed by Alex Deucher

drm/ttm: Downgrade pr_err to pr_debug for memory allocation failures

Memory allocation failure should generally be handled gracefully by
callers. In particular, with transparent hugepage support, attempts
to allocate huge pages can fail under memory pressure, but the callers
fall back to allocating individual pages instead. In that case, there
would be spurious

 [TTM] Unable to get page %u

error messages in dmesg.
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e1fc12c5
...@@ -329,7 +329,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, ...@@ -329,7 +329,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL); GFP_KERNEL);
if (!pages_to_free) { if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n"); pr_debug("Failed to allocate memory for pool free operation\n");
return 0; return 0;
} }
...@@ -517,7 +517,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -517,7 +517,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) { if (!caching_array) {
pr_err("Unable to allocate table for new pages\n"); pr_debug("Unable to allocate table for new pages\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -525,7 +525,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -525,7 +525,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
p = alloc_pages(gfp_flags, order); p = alloc_pages(gfp_flags, order);
if (!p) { if (!p) {
pr_err("Unable to get page %u\n", i); pr_debug("Unable to get page %u\n", i);
/* store already allocated pages in the pool after /* store already allocated pages in the pool after
* setting the caching state */ * setting the caching state */
...@@ -625,7 +625,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, ...@@ -625,7 +625,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
++pool->nrefills; ++pool->nrefills;
pool->npages += alloc_size; pool->npages += alloc_size;
} else { } else {
pr_err("Failed to fill pool (%p)\n", pool); pr_debug("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */ /* If we have any pages left put them to the pool. */
list_for_each_entry(p, &new_pages, lru) { list_for_each_entry(p, &new_pages, lru) {
++cpages; ++cpages;
...@@ -885,8 +885,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, ...@@ -885,8 +885,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
while (npages) { while (npages) {
p = alloc_page(gfp_flags); p = alloc_page(gfp_flags);
if (!p) { if (!p) {
pr_debug("Unable to allocate page\n");
pr_err("Unable to allocate page\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -925,7 +924,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, ...@@ -925,7 +924,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
/* If there is any pages in the list put them back to /* If there is any pages in the list put them back to
* the pool. * the pool.
*/ */
pr_err("Failed to allocate extra pages for large request\n"); pr_debug("Failed to allocate extra pages for large request\n");
ttm_put_pages(pages, count, flags, cstate); ttm_put_pages(pages, count, flags, cstate);
return r; return r;
} }
......
...@@ -463,7 +463,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, ...@@ -463,7 +463,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
GFP_KERNEL); GFP_KERNEL);
if (!pages_to_free) { if (!pages_to_free) {
pr_err("%s: Failed to allocate memory for pool free operation\n", pr_debug("%s: Failed to allocate memory for pool free operation\n",
pool->dev_name); pool->dev_name);
return 0; return 0;
} }
...@@ -755,7 +755,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, ...@@ -755,7 +755,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) { if (!caching_array) {
pr_err("%s: Unable to allocate table for new pages\n", pr_debug("%s: Unable to allocate table for new pages\n",
pool->dev_name); pool->dev_name);
return -ENOMEM; return -ENOMEM;
} }
...@@ -768,8 +768,8 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, ...@@ -768,8 +768,8 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
for (i = 0, cpages = 0; i < count; ++i) { for (i = 0, cpages = 0; i < count; ++i) {
dma_p = __ttm_dma_alloc_page(pool); dma_p = __ttm_dma_alloc_page(pool);
if (!dma_p) { if (!dma_p) {
pr_err("%s: Unable to get page %u\n", pr_debug("%s: Unable to get page %u\n",
pool->dev_name, i); pool->dev_name, i);
/* store already allocated pages in the pool after /* store already allocated pages in the pool after
* setting the caching state */ * setting the caching state */
...@@ -855,8 +855,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, ...@@ -855,8 +855,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
struct dma_page *d_page; struct dma_page *d_page;
unsigned cpages = 0; unsigned cpages = 0;
pr_err("%s: Failed to fill %s pool (r:%d)!\n", pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
pool->dev_name, pool->name, r); pool->dev_name, pool->name, r);
list_for_each_entry(d_page, &d_pages, page_list) { list_for_each_entry(d_page, &d_pages, page_list) {
cpages++; cpages++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment