Commit f20f272f authored by Mika Kuoppala's avatar Mika Kuoppala Committed by Chris Wilson

drm/i915/gtt: pde entry encoding is identical

For all page directory entries, the pde encoding is
identical. Don't complicate call sites with different
versions of doing the same thing, so we always check the
existence of physical page before writing the entry into
it. This further generalizes the pd so that manipulation in
callsites will be identical, removing the need to handle
pdps differently for gen8.

v2: squash
v3: inc/dec with set/clear (Chris)
v4: inlines, warn, stray set_pd (Chris)

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190705215204.4559-1-chris@chris-wilson.co.uk
parent 3e27d70b
...@@ -211,10 +211,10 @@ static u64 gen8_pte_encode(dma_addr_t addr, ...@@ -211,10 +211,10 @@ static u64 gen8_pte_encode(dma_addr_t addr,
return pte; return pte;
} }
static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, static u64 gen8_pde_encode(const dma_addr_t addr,
const enum i915_cache_level level) const enum i915_cache_level level)
{ {
gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; u64 pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr; pde |= addr;
if (level != I915_CACHE_NONE) if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE; pde |= PPAT_CACHED_PDE;
...@@ -223,9 +223,6 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, ...@@ -223,9 +223,6 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
return pde; return pde;
} }
#define gen8_pdpe_encode gen8_pde_encode
#define gen8_pml4e_encode gen8_pde_encode
static u64 snb_pte_encode(dma_addr_t addr, static u64 snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level, enum i915_cache_level level,
u32 flags) u32 flags)
...@@ -777,24 +774,55 @@ static void free_pd(struct i915_address_space *vm, ...@@ -777,24 +774,55 @@ static void free_pd(struct i915_address_space *vm,
kfree(pd); kfree(pd);
} }
static void init_pd_with_page(struct i915_address_space *vm, #define init_pd(vm, pd, to) { \
struct i915_page_directory * const pd, GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd)); \
struct i915_page_table *pt) fill_px((vm), (pd), gen8_pde_encode(px_dma(to), I915_CACHE_LLC)); \
memset_p((pd)->entry, (to), 512); \
}
static inline void
write_dma_entry(struct i915_page_dma * const pdma,
const unsigned short pde,
const u64 encoded_entry)
{
u64 * const vaddr = kmap_atomic(pdma->page);
vaddr[pde] = encoded_entry;
kunmap_atomic(vaddr);
}
static inline void
__set_pd_entry(struct i915_page_directory * const pd,
const unsigned short pde,
struct i915_page_dma * const to,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{ {
fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC)); GEM_BUG_ON(atomic_read(&pd->used) > 512);
memset_p(pd->entry, pt, 512);
atomic_inc(&pd->used);
pd->entry[pde] = to;
write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC));
} }
static void init_pd(struct i915_address_space *vm, static inline void
struct i915_page_directory * const pd, __clear_pd_entry(struct i915_page_directory * const pd,
struct i915_page_directory * const to) const unsigned short pde,
struct i915_page_dma * const to,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{ {
GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd)); GEM_BUG_ON(atomic_read(&pd->used) == 0);
fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC)); write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC));
memset_p(pd->entry, to, 512); pd->entry[pde] = to;
atomic_dec(&pd->used);
} }
#define set_pd_entry(pd, pde, to) \
__set_pd_entry((pd), (pde), px_base(to), gen8_pde_encode)
#define clear_pd_entry(pd, pde, to) \
__clear_pd_entry((pd), (pde), px_base(to), gen8_pde_encode)
/* /*
* PDE TLBs are a pain to invalidate on GEN8+. When we modify * PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that * the page table structures, we mark them dirty so that
...@@ -824,18 +852,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, ...@@ -824,18 +852,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
return !atomic_sub_return(num_entries, &pt->used); return !atomic_sub_return(num_entries, &pt->used);
} }
static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
struct i915_page_directory *pd,
struct i915_page_table *pt,
unsigned int pde)
{
gen8_pde_t *vaddr;
vaddr = kmap_atomic_px(pd);
vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
kunmap_atomic(vaddr);
}
static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
struct i915_page_directory *pd, struct i915_page_directory *pd,
u64 start, u64 length) u64 start, u64 length)
...@@ -853,11 +869,7 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, ...@@ -853,11 +869,7 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
spin_lock(&pd->lock); spin_lock(&pd->lock);
if (!atomic_read(&pt->used)) { if (!atomic_read(&pt->used)) {
gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde); clear_pd_entry(pd, pde, vm->scratch_pt);
pd->entry[pde] = vm->scratch_pt;
GEM_BUG_ON(!atomic_read(&pd->used));
atomic_dec(&pd->used);
free = true; free = true;
} }
spin_unlock(&pd->lock); spin_unlock(&pd->lock);
...@@ -868,20 +880,6 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, ...@@ -868,20 +880,6 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
return !atomic_read(&pd->used); return !atomic_read(&pd->used);
} }
static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp,
struct i915_page_directory *pd,
unsigned int pdpe)
{
gen8_ppgtt_pdpe_t *vaddr;
if (!pd_has_phys_page(pdp))
return;
vaddr = kmap_atomic_px(pdp);
vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
kunmap_atomic(vaddr);
}
/* Removes entries from a single page dir pointer, releasing it if it's empty. /* Removes entries from a single page dir pointer, releasing it if it's empty.
* Caller can use the return value to update higher-level entries * Caller can use the return value to update higher-level entries
*/ */
...@@ -902,11 +900,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, ...@@ -902,11 +900,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
spin_lock(&pdp->lock); spin_lock(&pdp->lock);
if (!atomic_read(&pd->used)) { if (!atomic_read(&pd->used)) {
gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); clear_pd_entry(pdp, pdpe, vm->scratch_pd);
pdp->entry[pdpe] = vm->scratch_pd;
GEM_BUG_ON(!atomic_read(&pdp->used));
atomic_dec(&pdp->used);
free = true; free = true;
} }
spin_unlock(&pdp->lock); spin_unlock(&pdp->lock);
...@@ -923,17 +917,6 @@ static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, ...@@ -923,17 +917,6 @@ static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length); gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length);
} }
static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4,
struct i915_page_directory *pdp,
unsigned int pml4e)
{
gen8_ppgtt_pml4e_t *vaddr;
vaddr = kmap_atomic_px(pml4);
vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
kunmap_atomic(vaddr);
}
/* Removes entries from a single pml4. /* Removes entries from a single pml4.
* This is the top-level structure in 4-level page tables used on gen8+. * This is the top-level structure in 4-level page tables used on gen8+.
* Empty entries are always scratch pml4e. * Empty entries are always scratch pml4e.
...@@ -957,8 +940,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, ...@@ -957,8 +940,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
spin_lock(&pml4->lock); spin_lock(&pml4->lock);
if (!atomic_read(&pdp->used)) { if (!atomic_read(&pdp->used)) {
gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); clear_pd_entry(pml4, pml4e, vm->scratch_pdp);
pml4->entry[pml4e] = vm->scratch_pdp;
free = true; free = true;
} }
spin_unlock(&pml4->lock); spin_unlock(&pml4->lock);
...@@ -1275,7 +1257,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) ...@@ -1275,7 +1257,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
} }
gen8_initialize_pt(vm, vm->scratch_pt); gen8_initialize_pt(vm, vm->scratch_pt);
init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt); init_pd(vm, vm->scratch_pd, vm->scratch_pt);
if (i915_vm_is_4lvl(vm)) if (i915_vm_is_4lvl(vm))
init_pd(vm, vm->scratch_pdp, vm->scratch_pd); init_pd(vm, vm->scratch_pdp, vm->scratch_pd);
...@@ -1298,6 +1280,11 @@ static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) ...@@ -1298,6 +1280,11 @@ static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
enum vgt_g2v_type msg; enum vgt_g2v_type msg;
int i; int i;
if (create)
atomic_inc(&ppgtt->pd->used); /* never remove */
else
atomic_dec(&ppgtt->pd->used);
if (i915_vm_is_4lvl(vm)) { if (i915_vm_is_4lvl(vm)) {
const u64 daddr = px_dma(ppgtt->pd); const u64 daddr = px_dma(ppgtt->pd);
...@@ -1414,9 +1401,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, ...@@ -1414,9 +1401,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
spin_lock(&pd->lock); spin_lock(&pd->lock);
if (pd->entry[pde] == vm->scratch_pt) { if (pd->entry[pde] == vm->scratch_pt) {
gen8_ppgtt_set_pde(vm, pd, pt, pde); set_pd_entry(pd, pde, pt);
pd->entry[pde] = pt;
atomic_inc(&pd->used);
} else { } else {
alloc = pt; alloc = pt;
pt = pd->entry[pde]; pt = pd->entry[pde];
...@@ -1458,13 +1443,11 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, ...@@ -1458,13 +1443,11 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
goto unwind; goto unwind;
} }
init_pd_with_page(vm, pd, vm->scratch_pt); init_pd(vm, pd, vm->scratch_pt);
spin_lock(&pdp->lock); spin_lock(&pdp->lock);
if (pdp->entry[pdpe] == vm->scratch_pd) { if (pdp->entry[pdpe] == vm->scratch_pd) {
gen8_ppgtt_set_pdpe(pdp, pd, pdpe); set_pd_entry(pdp, pdpe, pd);
pdp->entry[pdpe] = pd;
atomic_inc(&pdp->used);
} else { } else {
alloc = pd; alloc = pd;
pd = pdp->entry[pdpe]; pd = pdp->entry[pdpe];
...@@ -1490,12 +1473,9 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, ...@@ -1490,12 +1473,9 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
} }
spin_lock(&pdp->lock); spin_lock(&pdp->lock);
if (atomic_dec_and_test(&pd->used)) { if (atomic_dec_and_test(&pd->used)) {
gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
pdp->entry[pdpe] = vm->scratch_pd;
GEM_BUG_ON(!atomic_read(&pdp->used));
atomic_dec(&pdp->used);
GEM_BUG_ON(alloc); GEM_BUG_ON(alloc);
alloc = pd; /* defer the free to after the lock */ alloc = pd; /* defer the free to after the lock */
clear_pd_entry(pdp, pdpe, vm->scratch_pd);
} }
spin_unlock(&pdp->lock); spin_unlock(&pdp->lock);
unwind: unwind:
...@@ -1540,8 +1520,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, ...@@ -1540,8 +1520,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
spin_lock(&pml4->lock); spin_lock(&pml4->lock);
if (pml4->entry[pml4e] == vm->scratch_pdp) { if (pml4->entry[pml4e] == vm->scratch_pdp) {
gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); set_pd_entry(pml4, pml4e, pdp);
pml4->entry[pml4e] = pdp;
} else { } else {
alloc = pdp; alloc = pdp;
pdp = pml4->entry[pml4e]; pdp = pml4->entry[pml4e];
...@@ -1567,10 +1546,9 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, ...@@ -1567,10 +1546,9 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
} }
spin_lock(&pml4->lock); spin_lock(&pml4->lock);
if (atomic_dec_and_test(&pdp->used)) { if (atomic_dec_and_test(&pdp->used)) {
gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
pml4->entry[pml4e] = vm->scratch_pdp;
GEM_BUG_ON(alloc); GEM_BUG_ON(alloc);
alloc = pdp; /* defer the free until after the lock */ alloc = pdp; /* defer the free until after the lock */
clear_pd_entry(pml4, pml4e, vm->scratch_pdp);
} }
spin_unlock(&pml4->lock); spin_unlock(&pml4->lock);
unwind: unwind:
...@@ -1595,20 +1573,16 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) ...@@ -1595,20 +1573,16 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
if (IS_ERR(pd)) if (IS_ERR(pd))
goto unwind; goto unwind;
init_pd_with_page(vm, pd, vm->scratch_pt); init_pd(vm, pd, vm->scratch_pt);
gen8_ppgtt_set_pdpe(pdp, pd, pdpe); set_pd_entry(pdp, pdpe, pd);
atomic_inc(&pdp->used);
} }
atomic_inc(&pdp->used); /* never remove */
return 0; return 0;
unwind: unwind:
start -= from; start -= from;
gen8_for_each_pdpe(pd, pdp, from, start, pdpe) { gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); clear_pd_entry(pdp, pdpe, vm->scratch_pd);
free_pd(vm, pd); free_pd(vm, pd);
} }
atomic_set(&pdp->used, 0); atomic_set(&pdp->used, 0);
......
...@@ -72,9 +72,6 @@ struct intel_gt; ...@@ -72,9 +72,6 @@ struct intel_gt;
typedef u32 gen6_pte_t; typedef u32 gen6_pte_t;
typedef u64 gen8_pte_t; typedef u64 gen8_pte_t;
typedef u64 gen8_pde_t;
typedef u64 gen8_ppgtt_pdpe_t;
typedef u64 gen8_ppgtt_pml4e_t;
#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment