Commit 73eeea53 authored by Mika Kuoppala's avatar Mika Kuoppala Committed by Daniel Vetter

drm/i915/gtt: Introduce fill_page_dma()

When we setup page directories and tables, we point the entries
to a to the next level scratch structure. Make this generic
by introducing a fill_page_dma which maps and flushes. We also
need 32 bit variant for legacy gens.

v2: Fix flushes and handle valleyview (Ville)
v3: Now really fix flushes (Michel, Ville)
Reviewed-by: default avatarMichel Thierry <michel.thierry@intel.com>
Signed-off-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent cee30c54
...@@ -330,6 +330,34 @@ static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p) ...@@ -330,6 +330,34 @@ static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
} }
static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
const uint64_t val)
{
int i;
uint64_t * const vaddr = kmap_atomic(p->page);
for (i = 0; i < 512; i++)
vaddr[i] = val;
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
}
static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
const uint32_t val32)
{
uint64_t v = val32;
v = v << 32 | val32;
fill_page_dma(dev, p, v);
}
static void free_pt(struct drm_device *dev, struct i915_page_table *pt) static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
{ {
cleanup_page_dma(dev, &pt->base); cleanup_page_dma(dev, &pt->base);
...@@ -340,19 +368,11 @@ static void free_pt(struct drm_device *dev, struct i915_page_table *pt) ...@@ -340,19 +368,11 @@ static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
static void gen8_initialize_pt(struct i915_address_space *vm, static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt) struct i915_page_table *pt)
{ {
gen8_pte_t *pt_vaddr, scratch_pte; gen8_pte_t scratch_pte;
int i;
pt_vaddr = kmap_atomic(pt->base.page);
scratch_pte = gen8_pte_encode(vm->scratch.addr,
I915_CACHE_LLC, true);
for (i = 0; i < GEN8_PTES; i++) scratch_pte = gen8_pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
pt_vaddr[i] = scratch_pte;
if (!HAS_LLC(vm->dev)) fill_page_dma(vm->dev, &pt->base, scratch_pte);
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
} }
static struct i915_page_table *alloc_pt(struct drm_device *dev) static struct i915_page_table *alloc_pt(struct drm_device *dev)
...@@ -586,20 +606,13 @@ static void gen8_initialize_pd(struct i915_address_space *vm, ...@@ -586,20 +606,13 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
struct i915_page_directory *pd) struct i915_page_directory *pd)
{ {
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base); container_of(vm, struct i915_hw_ppgtt, base);
gen8_pde_t *page_directory; gen8_pde_t scratch_pde;
struct i915_page_table *pt;
int i;
page_directory = kmap_atomic(pd->base.page); scratch_pde = gen8_pde_encode(vm->dev, ppgtt->scratch_pt->base.daddr,
pt = ppgtt->scratch_pt; I915_CACHE_LLC);
for (i = 0; i < I915_PDES; i++)
/* Map the PDE to the page table */
__gen8_do_map_pt(page_directory + i, pt, vm->dev);
if (!HAS_LLC(vm->dev)) fill_page_dma(vm->dev, &pd->base, scratch_pde);
drm_clflush_virt_range(page_directory, PAGE_SIZE);
kunmap_atomic(page_directory);
} }
static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev) static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
...@@ -1255,22 +1268,15 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, ...@@ -1255,22 +1268,15 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
} }
static void gen6_initialize_pt(struct i915_address_space *vm, static void gen6_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt) struct i915_page_table *pt)
{ {
gen6_pte_t *pt_vaddr, scratch_pte; gen6_pte_t scratch_pte;
int i;
WARN_ON(vm->scratch.addr == 0); WARN_ON(vm->scratch.addr == 0);
scratch_pte = vm->pte_encode(vm->scratch.addr, scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
I915_CACHE_LLC, true, 0);
pt_vaddr = kmap_atomic(pt->base.page);
for (i = 0; i < GEN6_PTES; i++)
pt_vaddr[i] = scratch_pte;
kunmap_atomic(pt_vaddr); fill_page_dma_32(vm->dev, &pt->base, scratch_pte);
} }
static int gen6_alloc_va_range(struct i915_address_space *vm, static int gen6_alloc_va_range(struct i915_address_space *vm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment