Commit ca1fc489 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Paul Mackerras

KVM: PPC: Book3S: Allow backing bigger guest IOMMU pages with smaller physical pages

At the moment we only support in the host the IOMMU page sizes which
the guest is aware of, which is 4KB/64KB/16MB. However P9 does not support
16MB IOMMU pages, 2MB and 1GB pages are supported instead. We can still
emulate bigger guest pages (for example 16MB) with smaller host pages
(4KB/64KB/2MB).

This allows the physical IOMMU pages to use a page size smaller or equal
than the guest visible IOMMU page size.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent c6b61661
...@@ -176,14 +176,12 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, ...@@ -176,14 +176,12 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!tbltmp) if (!tbltmp)
continue; continue;
/* /* Make sure hardware table parameters are compatible */
* Make sure hardware table parameters are exactly the same; if ((tbltmp->it_page_shift <= stt->page_shift) &&
* this is used in the TCE handlers where boundary checks (tbltmp->it_offset << tbltmp->it_page_shift ==
* use only the first attached table. stt->offset << stt->page_shift) &&
*/ (tbltmp->it_size << tbltmp->it_page_shift ==
if ((tbltmp->it_page_shift == stt->page_shift) && stt->size << stt->page_shift)) {
(tbltmp->it_offset == stt->offset) &&
(tbltmp->it_size == stt->size)) {
/* /*
* Reference the table to avoid races with * Reference the table to avoid races with
* add/remove DMA windows. * add/remove DMA windows.
...@@ -396,7 +394,7 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, ...@@ -396,7 +394,7 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
return H_SUCCESS; return H_SUCCESS;
} }
static long kvmppc_tce_iommu_unmap(struct kvm *kvm, static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
struct iommu_table *tbl, unsigned long entry) struct iommu_table *tbl, unsigned long entry)
{ {
enum dma_data_direction dir = DMA_NONE; enum dma_data_direction dir = DMA_NONE;
...@@ -416,7 +414,24 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm, ...@@ -416,7 +414,24 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
return ret; return ret;
} }
long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
unsigned long entry)
{
unsigned long i, ret = H_SUCCESS;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry * subpages;
for (i = 0; i < subpages; ++i) {
ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
if (ret != H_SUCCESS)
break;
}
return ret;
}
long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry, unsigned long ua, unsigned long entry, unsigned long ua,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
...@@ -453,6 +468,27 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, ...@@ -453,6 +468,27 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
return 0; return 0;
} }
static long kvmppc_tce_iommu_map(struct kvm *kvm,
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
unsigned long entry, unsigned long ua,
enum dma_data_direction dir)
{
unsigned long i, pgoff, ret = H_SUCCESS;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry * subpages;
for (i = 0, pgoff = 0; i < subpages;
++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
ret = kvmppc_tce_iommu_do_map(kvm, tbl,
io_entry + i, ua + pgoff, dir);
if (ret != H_SUCCESS)
break;
}
return ret;
}
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce) unsigned long ioba, unsigned long tce)
{ {
...@@ -491,10 +527,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -491,10 +527,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (dir == DMA_NONE) if (dir == DMA_NONE)
ret = kvmppc_tce_iommu_unmap(vcpu->kvm, ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry); stit->tbl, entry);
else else
ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir); entry, ua, dir);
if (ret == H_SUCCESS) if (ret == H_SUCCESS)
...@@ -570,7 +606,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, ...@@ -570,7 +606,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return H_PARAMETER; return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
ret = kvmppc_tce_iommu_map(vcpu->kvm, ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry + i, ua, stit->tbl, entry + i, ua,
iommu_tce_direction(tce)); iommu_tce_direction(tce));
...@@ -618,7 +654,7 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, ...@@ -618,7 +654,7 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
unsigned long entry = ioba >> stt->page_shift; unsigned long entry = ioba >> stt->page_shift;
for (i = 0; i < npages; ++i) { for (i = 0; i < npages; ++i) {
ret = kvmppc_tce_iommu_unmap(vcpu->kvm, ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry + i); stit->tbl, entry + i);
if (ret == H_SUCCESS) if (ret == H_SUCCESS)
......
...@@ -221,7 +221,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, ...@@ -221,7 +221,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
return H_SUCCESS; return H_SUCCESS;
} }
static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
struct iommu_table *tbl, unsigned long entry) struct iommu_table *tbl, unsigned long entry)
{ {
enum dma_data_direction dir = DMA_NONE; enum dma_data_direction dir = DMA_NONE;
...@@ -245,7 +245,24 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, ...@@ -245,7 +245,24 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
return ret; return ret;
} }
static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
unsigned long entry)
{
unsigned long i, ret = H_SUCCESS;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry * subpages;
for (i = 0; i < subpages; ++i) {
ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
if (ret != H_SUCCESS)
break;
}
return ret;
}
static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry, unsigned long ua, unsigned long entry, unsigned long ua,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
...@@ -290,6 +307,27 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, ...@@ -290,6 +307,27 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
return 0; return 0;
} }
static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
unsigned long entry, unsigned long ua,
enum dma_data_direction dir)
{
unsigned long i, pgoff, ret = H_SUCCESS;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry * subpages;
for (i = 0, pgoff = 0; i < subpages;
++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
io_entry + i, ua + pgoff, dir);
if (ret != H_SUCCESS)
break;
}
return ret;
}
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce) unsigned long ioba, unsigned long tce)
{ {
...@@ -327,10 +365,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -327,10 +365,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (dir == DMA_NONE) if (dir == DMA_NONE)
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry); stit->tbl, entry);
else else
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir); stit->tbl, entry, ua, dir);
if (ret == H_SUCCESS) if (ret == H_SUCCESS)
...@@ -477,7 +515,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, ...@@ -477,7 +515,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return H_PARAMETER; return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry + i, ua, stit->tbl, entry + i, ua,
iommu_tce_direction(tce)); iommu_tce_direction(tce));
...@@ -529,7 +567,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, ...@@ -529,7 +567,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
unsigned long entry = ioba >> stt->page_shift; unsigned long entry = ioba >> stt->page_shift;
for (i = 0; i < npages; ++i) { for (i = 0; i < npages; ++i) {
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry + i); stit->tbl, entry + i);
if (ret == H_SUCCESS) if (ret == H_SUCCESS)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment