Commit c2b41276 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-ttm-pool' into drm-core-next

* drm-ttm-pool:
  drm/ttm: using kmalloc/kfree requires including slab.h
  drm/ttm: include linux/seq_file.h for seq_printf
  drm/ttm: Add sysfs interface to control pool allocator.
  drm/ttm: Use set_pages_array_wc instead of set_memory_wc.
  arch/x86: Add array variants for setting memory to wc caching.
  drm/nouveau: Add ttm page pool debugfs file.
  drm/radeon/kms: Add ttm page pool debugfs file.
  drm/ttm: Add debugfs output entry to pool allocator.
  drm/ttm: add pool wc/uc page allocator V3
parents 97921a5b 2125b8a4
...@@ -139,9 +139,11 @@ int set_memory_np(unsigned long addr, int numpages); ...@@ -139,9 +139,11 @@ int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages); int set_memory_4k(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray); int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wc(unsigned long *addr, int addrinarray);
int set_memory_array_wb(unsigned long *addr, int addrinarray); int set_memory_array_wb(unsigned long *addr, int addrinarray);
int set_pages_array_uc(struct page **pages, int addrinarray); int set_pages_array_uc(struct page **pages, int addrinarray);
int set_pages_array_wc(struct page **pages, int addrinarray);
int set_pages_array_wb(struct page **pages, int addrinarray); int set_pages_array_wb(struct page **pages, int addrinarray);
/* /*
......
...@@ -997,7 +997,8 @@ int set_memory_uc(unsigned long addr, int numpages) ...@@ -997,7 +997,8 @@ int set_memory_uc(unsigned long addr, int numpages)
} }
EXPORT_SYMBOL(set_memory_uc); EXPORT_SYMBOL(set_memory_uc);
int set_memory_array_uc(unsigned long *addr, int addrinarray) int _set_memory_array(unsigned long *addr, int addrinarray,
unsigned long new_type)
{ {
int i, j; int i, j;
int ret; int ret;
...@@ -1007,13 +1008,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray) ...@@ -1007,13 +1008,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray)
*/ */
for (i = 0; i < addrinarray; i++) { for (i = 0; i < addrinarray; i++) {
ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
_PAGE_CACHE_UC_MINUS, NULL); new_type, NULL);
if (ret) if (ret)
goto out_free; goto out_free;
} }
ret = change_page_attr_set(addr, addrinarray, ret = change_page_attr_set(addr, addrinarray,
__pgprot(_PAGE_CACHE_UC_MINUS), 1); __pgprot(_PAGE_CACHE_UC_MINUS), 1);
if (!ret && new_type == _PAGE_CACHE_WC)
ret = change_page_attr_set_clr(addr, addrinarray,
__pgprot(_PAGE_CACHE_WC),
__pgprot(_PAGE_CACHE_MASK),
0, CPA_ARRAY, NULL);
if (ret) if (ret)
goto out_free; goto out_free;
...@@ -1025,8 +1032,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray) ...@@ -1025,8 +1032,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray)
return ret; return ret;
} }
int set_memory_array_uc(unsigned long *addr, int addrinarray)
{
return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS);
}
EXPORT_SYMBOL(set_memory_array_uc); EXPORT_SYMBOL(set_memory_array_uc);
int set_memory_array_wc(unsigned long *addr, int addrinarray)
{
return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC);
}
EXPORT_SYMBOL(set_memory_array_wc);
int _set_memory_wc(unsigned long addr, int numpages) int _set_memory_wc(unsigned long addr, int numpages)
{ {
int ret; int ret;
...@@ -1153,26 +1171,34 @@ int set_pages_uc(struct page *page, int numpages) ...@@ -1153,26 +1171,34 @@ int set_pages_uc(struct page *page, int numpages)
} }
EXPORT_SYMBOL(set_pages_uc); EXPORT_SYMBOL(set_pages_uc);
int set_pages_array_uc(struct page **pages, int addrinarray) static int _set_pages_array(struct page **pages, int addrinarray,
unsigned long new_type)
{ {
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
int i; int i;
int free_idx; int free_idx;
int ret;
for (i = 0; i < addrinarray; i++) { for (i = 0; i < addrinarray; i++) {
if (PageHighMem(pages[i])) if (PageHighMem(pages[i]))
continue; continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT; start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE; end = start + PAGE_SIZE;
if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) if (reserve_memtype(start, end, new_type, NULL))
goto err_out; goto err_out;
} }
if (cpa_set_pages_array(pages, addrinarray, ret = cpa_set_pages_array(pages, addrinarray,
__pgprot(_PAGE_CACHE_UC_MINUS)) == 0) { __pgprot(_PAGE_CACHE_UC_MINUS));
return 0; /* Success */ if (!ret && new_type == _PAGE_CACHE_WC)
} ret = change_page_attr_set_clr(NULL, addrinarray,
__pgprot(_PAGE_CACHE_WC),
__pgprot(_PAGE_CACHE_MASK),
0, CPA_PAGES_ARRAY, pages);
if (ret)
goto err_out;
return 0; /* Success */
err_out: err_out:
free_idx = i; free_idx = i;
for (i = 0; i < free_idx; i++) { for (i = 0; i < free_idx; i++) {
...@@ -1184,8 +1210,19 @@ int set_pages_array_uc(struct page **pages, int addrinarray) ...@@ -1184,8 +1210,19 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
} }
return -EINVAL; return -EINVAL;
} }
int set_pages_array_uc(struct page **pages, int addrinarray)
{
return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS);
}
EXPORT_SYMBOL(set_pages_array_uc); EXPORT_SYMBOL(set_pages_array_uc);
int set_pages_array_wc(struct page **pages, int addrinarray)
{
return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC);
}
EXPORT_SYMBOL(set_pages_array_wc);
int set_pages_wb(struct page *page, int numpages) int set_pages_wb(struct page *page, int numpages)
{ {
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
#include "drmP.h" #include "drmP.h"
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include <ttm/ttm_page_alloc.h>
static int static int
nouveau_debugfs_channel_info(struct seq_file *m, void *data) nouveau_debugfs_channel_info(struct seq_file *m, void *data)
{ {
...@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = { ...@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL }, { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL }, { "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
}; };
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <ttm/ttm_bo_driver.h> #include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h> #include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h> #include <ttm/ttm_module.h>
#include <ttm/ttm_page_alloc.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/radeon_drm.h> #include <drm/radeon_drm.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
...@@ -745,8 +746,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) ...@@ -745,8 +746,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev) static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
unsigned i; unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
...@@ -763,7 +764,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) ...@@ -763,7 +764,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
} }
return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES); /* Add ttm page pool to debugfs */
sprintf(radeon_mem_types_names[i], "ttm_page_pool");
radeon_mem_types_list[i].name = radeon_mem_types_names[i];
radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
radeon_mem_types_list[i].driver_features = 0;
radeon_mem_types_list[i].data = NULL;
return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
#endif #endif
return 0; return 0;
......
...@@ -4,6 +4,6 @@ ...@@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
obj-$(CONFIG_DRM_TTM) += ttm.o obj-$(CONFIG_DRM_TTM) += ttm.o
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "ttm/ttm_memory.h" #include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h" #include "ttm/ttm_module.h"
#include "ttm/ttm_page_alloc.h"
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/wait.h> #include <linux/wait.h>
...@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) ...@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
"Zone %7s: Available graphics memory: %llu kiB.\n", "Zone %7s: Available graphics memory: %llu kiB.\n",
zone->name, (unsigned long long) zone->max_mem >> 10); zone->name, (unsigned long long) zone->max_mem >> 10);
} }
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0; return 0;
out_no_zone: out_no_zone:
ttm_mem_global_release(glob); ttm_mem_global_release(glob);
...@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) ...@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
unsigned int i; unsigned int i;
struct ttm_mem_zone *zone; struct ttm_mem_zone *zone;
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
flush_workqueue(glob->swap_queue); flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL; glob->swap_queue = NULL;
...@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) ...@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
zone = glob->zones[i]; zone = glob->zones[i];
kobject_del(&zone->kobj); kobject_del(&zone->kobj);
kobject_put(&zone->kobj); kobject_put(&zone->kobj);
} }
kobject_del(&glob->kobj); kobject_del(&glob->kobj);
kobject_put(&glob->kobj); kobject_put(&glob->kobj);
} }
......
This diff is collapsed.
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "ttm/ttm_module.h" #include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h" #include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h" #include "ttm/ttm_placement.h"
#include "ttm/ttm_page_alloc.h"
static int ttm_tt_swapin(struct ttm_tt *ttm); static int ttm_tt_swapin(struct ttm_tt *ttm);
...@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) ...@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
ttm->pages = NULL; ttm->pages = NULL;
} }
static struct page *ttm_tt_alloc_page(unsigned page_flags)
{
gfp_t gfp_flags = GFP_USER;
if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
if (page_flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= __GFP_DMA32;
else
gfp_flags |= __GFP_HIGHMEM;
return alloc_page(gfp_flags);
}
static void ttm_tt_free_user_pages(struct ttm_tt *ttm) static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
{ {
int write; int write;
...@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) ...@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{ {
struct page *p; struct page *p;
struct list_head h;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret; int ret;
while (NULL == (p = ttm->pages[index])) { while (NULL == (p = ttm->pages[index])) {
p = ttm_tt_alloc_page(ttm->page_flags);
if (!p) INIT_LIST_HEAD(&h);
ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
if (ret != 0)
return NULL; return NULL;
p = list_first_entry(&h, struct page, lru);
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
...@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, ...@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
if (ttm->caching_state == c_state) if (ttm->caching_state == c_state)
return 0; return 0;
if (c_state != tt_cached) { if (ttm->state == tt_unpopulated) {
ret = ttm_tt_populate(ttm); /* Change caching but don't populate */
if (unlikely(ret != 0)) ttm->caching_state = c_state;
return ret; return 0;
} }
if (ttm->caching_state == tt_cached) if (ttm->caching_state == tt_cached)
...@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching); ...@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{ {
int i; int i;
unsigned count = 0;
struct list_head h;
struct page *cur_page; struct page *cur_page;
struct ttm_backend *be = ttm->be; struct ttm_backend *be = ttm->be;
INIT_LIST_HEAD(&h);
if (be) if (be)
be->func->clear(be); be->func->clear(be);
(void)ttm_tt_set_caching(ttm, tt_cached);
for (i = 0; i < ttm->num_pages; ++i) { for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i]; cur_page = ttm->pages[i];
ttm->pages[i] = NULL; ttm->pages[i] = NULL;
if (cur_page) { if (cur_page) {
...@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) ...@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
"Leaking pages.\n"); "Leaking pages.\n");
ttm_mem_global_free_page(ttm->glob->mem_glob, ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page); cur_page);
__free_page(cur_page); list_add(&cur_page->lru, &h);
count++;
} }
} }
ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
ttm->state = tt_unpopulated; ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages; ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1; ttm->last_lomem_page = -1;
......
/*
* Copyright (c) Red Hat Inc.
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie <airlied@redhat.com>
* Jerome Glisse <jglisse@redhat.com>
*/
#ifndef TTM_PAGE_ALLOC
#define TTM_PAGE_ALLOC
#include "ttm_bo_driver.h"
#include "ttm_memory.h"
/**
* Get count number of pages from pool to pages list.
*
* @pages: heado of empty linked list where pages are filled.
* @flags: ttm flags for page allocation.
* @cstate: ttm caching state for the page.
* @count: number of pages to allocate.
*/
int ttm_get_pages(struct list_head *pages,
int flags,
enum ttm_caching_state cstate,
unsigned count);
/**
* Put linked list of pages to pool.
*
* @pages: list of pages to free.
* @page_count: number of pages in the list. Zero can be passed for unknown
* count.
* @flags: ttm flags for page allocation.
* @cstate: ttm caching state.
*/
void ttm_put_pages(struct list_head *pages,
unsigned page_count,
int flags,
enum ttm_caching_state cstate);
/**
* Initialize pool allocator.
*
* Pool allocator is internaly reference counted so it can be initialized
* multiple times but ttm_page_alloc_fini has to be called same number of
* times.
*/
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
/**
* Free pool allocator.
*/
void ttm_page_alloc_fini(void);
/**
* Output the state of pools to debugfs file
*/
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment