Commit 79ab9370 authored by Mika Kuoppala's avatar Mika Kuoppala Committed by Daniel Vetter

drm/i915/gtt: Move scratch_pd and scratch_pt into vm struct

Scratch page is part of struct i915_address_space. Move other
scratch entities into the same struct. This is a preparatory patch
for having only one instance of each scratch_pt/pd.

v2: make commit msg more readable
Signed-off-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com> (v1)
[danvet: Bikeshed summary to avoid confusion with vmas.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent fe36f55d
...@@ -612,12 +612,9 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, ...@@ -612,12 +612,9 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
static void gen8_initialize_pd(struct i915_address_space *vm, static void gen8_initialize_pd(struct i915_address_space *vm,
struct i915_page_directory *pd) struct i915_page_directory *pd)
{ {
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_pde_t scratch_pde; gen8_pde_t scratch_pde;
scratch_pde = gen8_pde_encode(px_dma(ppgtt->scratch_pt), scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
I915_CACHE_LLC);
fill_px(vm->dev, pd, scratch_pde); fill_px(vm->dev, pd, scratch_pde);
} }
...@@ -652,8 +649,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) ...@@ -652,8 +649,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]); free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
} }
free_pd(ppgtt->base.dev, ppgtt->scratch_pd); free_pd(vm->dev, vm->scratch_pd);
free_pt(ppgtt->base.dev, ppgtt->scratch_pt); free_pt(vm->dev, vm->scratch_pt);
} }
/** /**
...@@ -689,7 +686,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt, ...@@ -689,7 +686,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
/* Don't reallocate page tables */ /* Don't reallocate page tables */
if (pt) { if (pt) {
/* Scratch is never allocated this way */ /* Scratch is never allocated this way */
WARN_ON(pt == ppgtt->scratch_pt); WARN_ON(pt == ppgtt->base.scratch_pt);
continue; continue;
} }
...@@ -940,16 +937,16 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, ...@@ -940,16 +937,16 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
*/ */
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{ {
ppgtt->scratch_pt = alloc_pt(ppgtt->base.dev); ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
if (IS_ERR(ppgtt->scratch_pt)) if (IS_ERR(ppgtt->base.scratch_pt))
return PTR_ERR(ppgtt->scratch_pt); return PTR_ERR(ppgtt->base.scratch_pt);
ppgtt->scratch_pd = alloc_pd(ppgtt->base.dev); ppgtt->base.scratch_pd = alloc_pd(ppgtt->base.dev);
if (IS_ERR(ppgtt->scratch_pd)) if (IS_ERR(ppgtt->base.scratch_pd))
return PTR_ERR(ppgtt->scratch_pd); return PTR_ERR(ppgtt->base.scratch_pd);
gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt); gen8_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd); gen8_initialize_pd(&ppgtt->base, ppgtt->base.scratch_pd);
ppgtt->base.start = 0; ppgtt->base.start = 0;
ppgtt->base.total = 1ULL << 32; ppgtt->base.total = 1ULL << 32;
...@@ -981,7 +978,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) ...@@ -981,7 +978,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t pte, pde, temp; uint32_t pte, pde, temp;
uint32_t start = ppgtt->base.start, length = ppgtt->base.total; uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), I915_CACHE_LLC, true, 0); scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, true, 0);
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
u32 expected; u32 expected;
...@@ -1314,7 +1312,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, ...@@ -1314,7 +1312,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
* tables. * tables.
*/ */
gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
if (pt != ppgtt->scratch_pt) { if (pt != vm->scratch_pt) {
WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
continue; continue;
} }
...@@ -1369,7 +1367,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, ...@@ -1369,7 +1367,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
for_each_set_bit(pde, new_page_tables, I915_PDES) { for_each_set_bit(pde, new_page_tables, I915_PDES) {
struct i915_page_table *pt = ppgtt->pd.page_table[pde]; struct i915_page_table *pt = ppgtt->pd.page_table[pde];
ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; ppgtt->pd.page_table[pde] = vm->scratch_pt;
free_pt(vm->dev, pt); free_pt(vm->dev, pt);
} }
...@@ -1384,15 +1382,14 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) ...@@ -1384,15 +1382,14 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
struct i915_page_table *pt; struct i915_page_table *pt;
uint32_t pde; uint32_t pde;
drm_mm_remove_node(&ppgtt->node); drm_mm_remove_node(&ppgtt->node);
gen6_for_all_pdes(pt, ppgtt, pde) { gen6_for_all_pdes(pt, ppgtt, pde) {
if (pt != ppgtt->scratch_pt) if (pt != vm->scratch_pt)
free_pt(ppgtt->base.dev, pt); free_pt(ppgtt->base.dev, pt);
} }
free_pt(ppgtt->base.dev, ppgtt->scratch_pt); free_pt(vm->dev, vm->scratch_pt);
} }
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
...@@ -1407,11 +1404,11 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) ...@@ -1407,11 +1404,11 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* size. We allocate at the top of the GTT to avoid fragmentation. * size. We allocate at the top of the GTT to avoid fragmentation.
*/ */
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
ppgtt->scratch_pt = alloc_pt(ppgtt->base.dev); ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
if (IS_ERR(ppgtt->scratch_pt)) if (IS_ERR(ppgtt->base.scratch_pt))
return PTR_ERR(ppgtt->scratch_pt); return PTR_ERR(ppgtt->base.scratch_pt);
gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt); gen6_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
alloc: alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
...@@ -1442,7 +1439,7 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) ...@@ -1442,7 +1439,7 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
return 0; return 0;
err_out: err_out:
free_pt(ppgtt->base.dev, ppgtt->scratch_pt); free_pt(ppgtt->base.dev, ppgtt->base.scratch_pt);
return ret; return ret;
} }
...@@ -1458,7 +1455,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, ...@@ -1458,7 +1455,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
uint32_t pde, temp; uint32_t pde, temp;
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
} }
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
......
...@@ -254,6 +254,8 @@ struct i915_address_space { ...@@ -254,6 +254,8 @@ struct i915_address_space {
u64 total; /* size addr space maps (ex. 2GB for ggtt) */ u64 total; /* size addr space maps (ex. 2GB for ggtt) */
struct i915_page_scratch *scratch_page; struct i915_page_scratch *scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
/** /**
* List of objects currently involved in rendering. * List of objects currently involved in rendering.
...@@ -343,9 +345,6 @@ struct i915_hw_ppgtt { ...@@ -343,9 +345,6 @@ struct i915_hw_ppgtt {
struct i915_page_directory pd; struct i915_page_directory pd;
}; };
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
struct drm_i915_file_private *file_priv; struct drm_i915_file_private *file_priv;
gen6_pte_t __iomem *pd_addr; gen6_pte_t __iomem *pd_addr;
...@@ -487,7 +486,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) ...@@ -487,7 +486,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
{ {
return test_bit(n, ppgtt->pdp.used_pdpes) ? return test_bit(n, ppgtt->pdp.used_pdpes) ?
px_dma(ppgtt->pdp.page_directory[n]) : px_dma(ppgtt->pdp.page_directory[n]) :
px_dma(ppgtt->scratch_pd); px_dma(ppgtt->base.scratch_pd);
} }
int i915_gem_gtt_init(struct drm_device *dev); int i915_gem_gtt_init(struct drm_device *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment