Commit a3ac077b authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman

KVM: PPC: Remove redundand permission bits removal

The kvmppc_gpa_to_ua() helper itself takes care of the permission
bits in the TCE and yet every single caller removes them.

This changes semantics of kvmppc_gpa_to_ua() so it takes TCEs
(which are GPAs + TCE permission bits) to make the callers simpler.

This should cause no behavioural change.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 2691f0ff
...@@ -194,7 +194,7 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table( ...@@ -194,7 +194,7 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \ (stt)->size, (ioba), (npages)) ? \
H_PARAMETER : H_SUCCESS) H_PARAMETER : H_SUCCESS)
extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
unsigned long *ua, unsigned long **prmap); unsigned long *ua, unsigned long **prmap);
extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt, extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
unsigned long idx, unsigned long tce); unsigned long idx, unsigned long tce);
......
...@@ -378,8 +378,7 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, ...@@ -378,8 +378,7 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (iommu_tce_check_gpa(stt->page_shift, gpa)) if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_TOO_HARD; return H_TOO_HARD;
if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
&ua, NULL))
return H_TOO_HARD; return H_TOO_HARD;
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
...@@ -552,8 +551,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -552,8 +551,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
ret = H_PARAMETER; ret = H_PARAMETER;
goto unlock_exit; goto unlock_exit;
} }
...@@ -614,7 +612,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, ...@@ -614,7 +612,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return ret; return ret;
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) { if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
ret = H_TOO_HARD; ret = H_TOO_HARD;
goto unlock_exit; goto unlock_exit;
} }
...@@ -649,9 +647,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, ...@@ -649,9 +647,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
} }
tce = be64_to_cpu(tce); tce = be64_to_cpu(tce);
if (kvmppc_gpa_to_ua(vcpu->kvm, if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
&ua, NULL))
return H_PARAMETER; return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
......
...@@ -111,8 +111,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, ...@@ -111,8 +111,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (iommu_tce_check_gpa(stt->page_shift, gpa)) if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_PARAMETER; return H_PARAMETER;
if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
&ua, NULL))
return H_TOO_HARD; return H_TOO_HARD;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
...@@ -182,10 +181,10 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, ...@@ -182,10 +181,10 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
} }
EXPORT_SYMBOL_GPL(kvmppc_tce_put); EXPORT_SYMBOL_GPL(kvmppc_tce_put);
long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
unsigned long *ua, unsigned long **prmap) unsigned long *ua, unsigned long **prmap)
{ {
unsigned long gfn = gpa >> PAGE_SHIFT; unsigned long gfn = tce >> PAGE_SHIFT;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
memslot = search_memslots(kvm_memslots(kvm), gfn); memslot = search_memslots(kvm_memslots(kvm), gfn);
...@@ -193,7 +192,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, ...@@ -193,7 +192,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
return -EINVAL; return -EINVAL;
*ua = __gfn_to_hva_memslot(memslot, gfn) | *ua = __gfn_to_hva_memslot(memslot, gfn) |
(gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
if (prmap) if (prmap)
...@@ -202,7 +201,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, ...@@ -202,7 +201,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
...@@ -391,8 +390,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -391,8 +390,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret; return ret;
dir = iommu_tce_direction(tce); dir = iommu_tce_direction(tce);
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
return H_PARAMETER; return H_PARAMETER;
entry = ioba >> stt->page_shift; entry = ioba >> stt->page_shift;
...@@ -494,7 +492,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, ...@@ -494,7 +492,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/ */
struct mm_iommu_table_group_mem_t *mem; struct mm_iommu_table_group_mem_t *mem;
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
return H_TOO_HARD; return H_TOO_HARD;
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
...@@ -510,7 +508,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, ...@@ -510,7 +508,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
* We do not require memory to be preregistered in this case * We do not require memory to be preregistered in this case
* so lock rmap and do __find_linux_pte_or_hugepte(). * so lock rmap and do __find_linux_pte_or_hugepte().
*/ */
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
return H_TOO_HARD; return H_TOO_HARD;
rmap = (void *) vmalloc_to_phys(rmap); rmap = (void *) vmalloc_to_phys(rmap);
...@@ -544,9 +542,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, ...@@ -544,9 +542,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
ua = 0; ua = 0;
if (kvmppc_gpa_to_ua(vcpu->kvm, if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
&ua, NULL))
return H_PARAMETER; return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment