Commit 78af329a authored by David Herrmann's avatar David Herrmann Committed by Dave Airlie

drm/ttm: replace drm_mm_pre_get() by direct alloc

Instead of calling drm_mm_pre_get() in a row, we now preallocate the node
and then use the atomic insertion functions. This has the exact same
semantics and there is no reason to use the racy pre-allocations.

Note that ttm_bo_man_get_node() does not run in atomic context. Nouveau
already uses GFP_KERNEL alloc in nouveau/nouveau_ttm.c in
nouveau_gart_manager_new(). So we can do the same in
ttm_bo_man_get_node().
Signed-off-by: default avatarDavid Herrmann <dh.herrmann@gmail.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 28ec711c
......@@ -61,29 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
do {
ret = drm_mm_pre_get(mm);
if (unlikely(ret))
return ret;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
spin_lock(&rman->lock);
node = drm_mm_search_free_in_range(mm,
mem->num_pages, mem->page_alignment,
ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
mem->page_alignment,
placement->fpfn, lpfn,
DRM_MM_SEARCH_BEST);
if (unlikely(node == NULL)) {
spin_unlock(&rman->lock);
return 0;
}
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
spin_unlock(&rman->lock);
} while (node == NULL);
if (unlikely(ret)) {
kfree(node);
} else {
mem->mm_node = node;
mem->start = node->start;
}
return 0;
}
......@@ -94,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
if (mem->mm_node) {
spin_lock(&rman->lock);
drm_mm_put_block(mem->mm_node);
drm_mm_remove_node(mem->mm_node);
spin_unlock(&rman->lock);
kfree(mem->mm_node);
mem->mm_node = NULL;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment