Commit db5c8e29 authored by Ben Skeggs's avatar Ben Skeggs

drm/nv50-nvc0: restrict memtype to those specified at creation time

Upcoming patches are going to enable full support for buffers that keep
a constant GPU virtual address whenever they're validated for use by
the GPU.

In order for this to work properly while keeping support for large pages,
we need to know if it's ever going to be possible for a buffer to end
up in GART, and if so, disable large pages for the buffer's VMA.

This is a new restriction that's not present in earlier kernel's, but
should not break userspace as the current code never attempts to validate
buffers into a memtype other than it was created with.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 6ba9a683
...@@ -54,8 +54,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) ...@@ -54,8 +54,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
} }
static void static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size, nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int *page_shift) int *align, int *size, int *page_shift)
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
...@@ -80,7 +80,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size, ...@@ -80,7 +80,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
} }
} else { } else {
if (likely(dev_priv->chan_vm)) { if (likely(dev_priv->chan_vm)) {
if (*size > 256 * 1024) if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
*page_shift = dev_priv->chan_vm->lpg_shift; *page_shift = dev_priv->chan_vm->lpg_shift;
else else
*page_shift = dev_priv->chan_vm->spg_shift; *page_shift = dev_priv->chan_vm->spg_shift;
...@@ -113,7 +113,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, ...@@ -113,7 +113,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nvbo->tile_flags = tile_flags; nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev; nvbo->bo.bdev = &dev_priv->ttm.bdev;
nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift); nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
align >>= PAGE_SHIFT; align >>= PAGE_SHIFT;
if (dev_priv->chan_vm) { if (dev_priv->chan_vm) {
......
...@@ -90,6 +90,7 @@ struct nouveau_tile_reg { ...@@ -90,6 +90,7 @@ struct nouveau_tile_reg {
struct nouveau_bo { struct nouveau_bo {
struct ttm_buffer_object bo; struct ttm_buffer_object bo;
struct ttm_placement placement; struct ttm_placement placement;
u32 valid_domains;
u32 placements[3]; u32 placements[3];
u32 busy_placements[3]; u32 busy_placements[3];
struct ttm_bo_kmap_obj kmap; struct ttm_bo_kmap_obj kmap;
......
...@@ -64,6 +64,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, ...@@ -64,6 +64,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t domain, uint32_t tile_mode, int size, int align, uint32_t domain, uint32_t tile_mode,
uint32_t tile_flags, struct nouveau_bo **pnvbo) uint32_t tile_flags, struct nouveau_bo **pnvbo)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
u32 flags = 0; u32 flags = 0;
int ret; int ret;
...@@ -81,6 +82,15 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, ...@@ -81,6 +82,15 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
return ret; return ret;
nvbo = *pnvbo; nvbo = *pnvbo;
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time. not possibly on
* earlier chips without busting the ABI.
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
if (dev_priv->card_type >= NV_50)
nvbo->valid_domains &= domain;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) { if (!nvbo->gem) {
nouveau_bo_ref(NULL, pnvbo); nouveau_bo_ref(NULL, pnvbo);
...@@ -159,7 +169,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, ...@@ -159,7 +169,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
{ {
struct nouveau_bo *nvbo = gem->driver_private; struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo; struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains); (write_domains ? write_domains : read_domains);
uint32_t pref_flags = 0, valid_flags = 0; uint32_t pref_flags = 0, valid_flags = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment