Commit 4de354ec authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu/vt-d: Delegate the identity domain to upper layer

This allows the iommu generic layer to allocate an identity domain
and attach it to a device. Hence, the identity domain is delegated
to upper layer. As a side effect, iommu_identity_mapping can't be
used to check the existence of identity domains any more.
Signed-off-by: default avatarJames Sewart <jamessewart@arista.com>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent f273a453
...@@ -350,6 +350,7 @@ static void domain_context_clear(struct intel_iommu *iommu, ...@@ -350,6 +350,7 @@ static void domain_context_clear(struct intel_iommu *iommu,
struct device *dev); struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain, static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu); struct intel_iommu *iommu);
static bool device_is_rmrr_locked(struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0; int dmar_disabled = 0;
...@@ -2808,7 +2809,9 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width); ...@@ -2808,7 +2809,9 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw) static int __init si_domain_init(int hw)
{ {
int nid, ret; struct dmar_rmrr_unit *rmrr;
struct device *dev;
int i, nid, ret;
si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY); si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain) if (!si_domain)
...@@ -2819,8 +2822,6 @@ static int __init si_domain_init(int hw) ...@@ -2819,8 +2822,6 @@ static int __init si_domain_init(int hw)
return -EFAULT; return -EFAULT;
} }
pr_debug("Identity mapping domain allocated\n");
if (hw) if (hw)
return 0; return 0;
...@@ -2836,6 +2837,31 @@ static int __init si_domain_init(int hw) ...@@ -2836,6 +2837,31 @@ static int __init si_domain_init(int hw)
} }
} }
/*
* Normally we use DMA domains for devices which have RMRRs. But we
* loose this requirement for graphic and usb devices. Identity map
* the RMRRs for graphic and USB devices so that they could use the
* si_domain.
*/
for_each_rmrr_units(rmrr) {
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
i, dev) {
unsigned long long start = rmrr->base_address;
unsigned long long end = rmrr->end_address;
if (device_is_rmrr_locked(dev))
continue;
if (WARN_ON(end < start ||
end >> agaw_to_width(si_domain->agaw)))
continue;
ret = iommu_domain_identity_map(si_domain, start, end);
if (ret)
return ret;
}
}
return 0; return 0;
} }
...@@ -2843,9 +2869,6 @@ static int identity_mapping(struct device *dev) ...@@ -2843,9 +2869,6 @@ static int identity_mapping(struct device *dev)
{ {
struct device_domain_info *info; struct device_domain_info *info;
if (likely(!iommu_identity_mapping))
return 0;
info = dev->archdata.iommu; info = dev->archdata.iommu;
if (info && info != DUMMY_DEVICE_DOMAIN_INFO) if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
return (info->domain == si_domain); return (info->domain == si_domain);
...@@ -3431,11 +3454,9 @@ static int __init init_dmars(void) ...@@ -3431,11 +3454,9 @@ static int __init init_dmars(void)
check_tylersburg_isoch(); check_tylersburg_isoch();
if (iommu_identity_mapping) { ret = si_domain_init(hw_pass_through);
ret = si_domain_init(hw_pass_through); if (ret)
if (ret) goto free_iommu;
goto free_iommu;
}
/* /*
...@@ -3628,9 +3649,6 @@ static bool iommu_need_mapping(struct device *dev) ...@@ -3628,9 +3649,6 @@ static bool iommu_need_mapping(struct device *dev)
if (iommu_dummy(dev)) if (iommu_dummy(dev))
return false; return false;
if (!iommu_identity_mapping)
return true;
found = identity_mapping(dev); found = identity_mapping(dev);
if (found) { if (found) {
if (iommu_should_identity_map(dev, 0)) if (iommu_should_identity_map(dev, 0))
...@@ -5051,32 +5069,40 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) ...@@ -5051,32 +5069,40 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
struct dmar_domain *dmar_domain; struct dmar_domain *dmar_domain;
struct iommu_domain *domain; struct iommu_domain *domain;
if (type != IOMMU_DOMAIN_UNMANAGED) switch (type) {
return NULL; case IOMMU_DOMAIN_UNMANAGED:
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
if (!dmar_domain) {
pr_err("Can't allocate dmar_domain\n");
return NULL;
}
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
pr_err("Domain initialization failed\n");
domain_exit(dmar_domain);
return NULL;
}
domain_update_iommu_cap(dmar_domain);
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE); domain = &dmar_domain->domain;
if (!dmar_domain) { domain->geometry.aperture_start = 0;
pr_err("Can't allocate dmar_domain\n"); domain->geometry.aperture_end =
return NULL; __DOMAIN_MAX_ADDR(dmar_domain->gaw);
} domain->geometry.force_aperture = true;
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
pr_err("Domain initialization failed\n"); return domain;
domain_exit(dmar_domain); case IOMMU_DOMAIN_IDENTITY:
return &si_domain->domain;
default:
return NULL; return NULL;
} }
domain_update_iommu_cap(dmar_domain);
domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
domain->geometry.force_aperture = true;
return domain; return NULL;
} }
static void intel_iommu_domain_free(struct iommu_domain *domain) static void intel_iommu_domain_free(struct iommu_domain *domain)
{ {
domain_exit(to_dmar_domain(domain)); if (domain != &si_domain->domain)
domain_exit(to_dmar_domain(domain));
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment