Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
d53bff88
Commit
d53bff88
authored
Apr 26, 2019
by
Joerg Roedel
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'api-features' into arm/smmu
parents
26ac2b6e
26b25a2b
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
340 additions
and
0 deletions
+340
-0
drivers/iommu/iommu.c
drivers/iommu/iommu.c
+200
-0
include/linux/iommu.h
include/linux/iommu.h
+140
-0
No files found.
drivers/iommu/iommu.c
View file @
d53bff88
...
...
@@ -2039,3 +2039,203 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
return
0
;
}
EXPORT_SYMBOL_GPL
(
iommu_fwspec_add_ids
);
/*
* Per device IOMMU features.
*/
bool
iommu_dev_has_feature
(
struct
device
*
dev
,
enum
iommu_dev_features
feat
)
{
const
struct
iommu_ops
*
ops
=
dev
->
bus
->
iommu_ops
;
if
(
ops
&&
ops
->
dev_has_feat
)
return
ops
->
dev_has_feat
(
dev
,
feat
);
return
false
;
}
EXPORT_SYMBOL_GPL
(
iommu_dev_has_feature
);
int
iommu_dev_enable_feature
(
struct
device
*
dev
,
enum
iommu_dev_features
feat
)
{
const
struct
iommu_ops
*
ops
=
dev
->
bus
->
iommu_ops
;
if
(
ops
&&
ops
->
dev_enable_feat
)
return
ops
->
dev_enable_feat
(
dev
,
feat
);
return
-
ENODEV
;
}
EXPORT_SYMBOL_GPL
(
iommu_dev_enable_feature
);
/*
* The device drivers should do the necessary cleanups before calling this.
* For example, before disabling the aux-domain feature, the device driver
* should detach all aux-domains. Otherwise, this will return -EBUSY.
*/
int
iommu_dev_disable_feature
(
struct
device
*
dev
,
enum
iommu_dev_features
feat
)
{
const
struct
iommu_ops
*
ops
=
dev
->
bus
->
iommu_ops
;
if
(
ops
&&
ops
->
dev_disable_feat
)
return
ops
->
dev_disable_feat
(
dev
,
feat
);
return
-
EBUSY
;
}
EXPORT_SYMBOL_GPL
(
iommu_dev_disable_feature
);
bool
iommu_dev_feature_enabled
(
struct
device
*
dev
,
enum
iommu_dev_features
feat
)
{
const
struct
iommu_ops
*
ops
=
dev
->
bus
->
iommu_ops
;
if
(
ops
&&
ops
->
dev_feat_enabled
)
return
ops
->
dev_feat_enabled
(
dev
,
feat
);
return
false
;
}
EXPORT_SYMBOL_GPL
(
iommu_dev_feature_enabled
);
/*
* Aux-domain specific attach/detach.
*
* Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
* true. Also, as long as domains are attached to a device through this
* interface, any tries to call iommu_attach_device() should fail
* (iommu_detach_device() can't fail, so we fail when trying to re-attach).
* This should make us safe against a device being attached to a guest as a
* whole while there are still pasid users on it (aux and sva).
*/
int
iommu_aux_attach_device
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
)
{
int
ret
=
-
ENODEV
;
if
(
domain
->
ops
->
aux_attach_dev
)
ret
=
domain
->
ops
->
aux_attach_dev
(
domain
,
dev
);
if
(
!
ret
)
trace_attach_device_to_domain
(
dev
);
return
ret
;
}
EXPORT_SYMBOL_GPL
(
iommu_aux_attach_device
);
void
iommu_aux_detach_device
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
)
{
if
(
domain
->
ops
->
aux_detach_dev
)
{
domain
->
ops
->
aux_detach_dev
(
domain
,
dev
);
trace_detach_device_from_domain
(
dev
);
}
}
EXPORT_SYMBOL_GPL
(
iommu_aux_detach_device
);
int
iommu_aux_get_pasid
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
)
{
int
ret
=
-
ENODEV
;
if
(
domain
->
ops
->
aux_get_pasid
)
ret
=
domain
->
ops
->
aux_get_pasid
(
domain
,
dev
);
return
ret
;
}
EXPORT_SYMBOL_GPL
(
iommu_aux_get_pasid
);
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
* @mm: the mm to bind, caller must hold a reference to it
*
* Create a bond between device and address space, allowing the device to access
* the mm using the returned PASID. If a bond already exists between @device and
* @mm, it is returned and an additional reference is taken. Caller must call
* iommu_sva_unbind_device() to release each reference.
*
* iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
* initialize the required SVA features.
*
* On error, returns an ERR_PTR value.
*/
struct
iommu_sva
*
iommu_sva_bind_device
(
struct
device
*
dev
,
struct
mm_struct
*
mm
,
void
*
drvdata
)
{
struct
iommu_group
*
group
;
struct
iommu_sva
*
handle
=
ERR_PTR
(
-
EINVAL
);
const
struct
iommu_ops
*
ops
=
dev
->
bus
->
iommu_ops
;
if
(
!
ops
||
!
ops
->
sva_bind
)
return
ERR_PTR
(
-
ENODEV
);
group
=
iommu_group_get
(
dev
);
if
(
!
group
)
return
ERR_PTR
(
-
ENODEV
);
/* Ensure device count and domain don't change while we're binding */
mutex_lock
(
&
group
->
mutex
);
/*
* To keep things simple, SVA currently doesn't support IOMMU groups
* with more than one device. Existing SVA-capable systems are not
* affected by the problems that required IOMMU groups (lack of ACS
* isolation, device ID aliasing and other hardware issues).
*/
if
(
iommu_group_device_count
(
group
)
!=
1
)
goto
out_unlock
;
handle
=
ops
->
sva_bind
(
dev
,
mm
,
drvdata
);
out_unlock:
mutex_unlock
(
&
group
->
mutex
);
iommu_group_put
(
group
);
return
handle
;
}
EXPORT_SYMBOL_GPL
(
iommu_sva_bind_device
);
/**
* iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
* @handle: the handle returned by iommu_sva_bind_device()
*
* Put reference to a bond between device and address space. The device should
* not be issuing any more transaction for this PASID. All outstanding page
* requests for this PASID must have been flushed to the IOMMU.
*
* Returns 0 on success, or an error value
*/
void
iommu_sva_unbind_device
(
struct
iommu_sva
*
handle
)
{
struct
iommu_group
*
group
;
struct
device
*
dev
=
handle
->
dev
;
const
struct
iommu_ops
*
ops
=
dev
->
bus
->
iommu_ops
;
if
(
!
ops
||
!
ops
->
sva_unbind
)
return
;
group
=
iommu_group_get
(
dev
);
if
(
!
group
)
return
;
mutex_lock
(
&
group
->
mutex
);
ops
->
sva_unbind
(
handle
);
mutex_unlock
(
&
group
->
mutex
);
iommu_group_put
(
group
);
}
EXPORT_SYMBOL_GPL
(
iommu_sva_unbind_device
);
int
iommu_sva_set_ops
(
struct
iommu_sva
*
handle
,
const
struct
iommu_sva_ops
*
sva_ops
)
{
if
(
handle
->
ops
&&
handle
->
ops
!=
sva_ops
)
return
-
EEXIST
;
handle
->
ops
=
sva_ops
;
return
0
;
}
EXPORT_SYMBOL_GPL
(
iommu_sva_set_ops
);
int
iommu_sva_get_pasid
(
struct
iommu_sva
*
handle
)
{
const
struct
iommu_ops
*
ops
=
handle
->
dev
->
bus
->
iommu_ops
;
if
(
!
ops
||
!
ops
->
sva_get_pasid
)
return
IOMMU_PASID_INVALID
;
return
ops
->
sva_get_pasid
(
handle
);
}
EXPORT_SYMBOL_GPL
(
iommu_sva_get_pasid
);
include/linux/iommu.h
View file @
d53bff88
...
...
@@ -48,6 +48,7 @@ struct bus_type;
struct
device
;
struct
iommu_domain
;
struct
notifier_block
;
struct
iommu_sva
;
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
...
...
@@ -55,6 +56,8 @@ struct notifier_block;
typedef
int
(
*
iommu_fault_handler_t
)(
struct
iommu_domain
*
,
struct
device
*
,
unsigned
long
,
int
,
void
*
);
typedef
int
(
*
iommu_mm_exit_handler_t
)(
struct
device
*
dev
,
struct
iommu_sva
*
,
void
*
);
struct
iommu_domain_geometry
{
dma_addr_t
aperture_start
;
/* First address that can be mapped */
...
...
@@ -156,6 +159,33 @@ struct iommu_resv_region {
enum
iommu_resv_type
type
;
};
/* Per device IOMMU features */
enum
iommu_dev_features
{
IOMMU_DEV_FEAT_AUX
,
/* Aux-domain feature */
IOMMU_DEV_FEAT_SVA
,
/* Shared Virtual Addresses */
};
#define IOMMU_PASID_INVALID (-1U)
/**
* struct iommu_sva_ops - device driver callbacks for an SVA context
*
* @mm_exit: called when the mm is about to be torn down by exit_mmap. After
* @mm_exit returns, the device must not issue any more transaction
* with the PASID given as argument.
*
* The @mm_exit handler is allowed to sleep. Be careful about the
* locks taken in @mm_exit, because they might lead to deadlocks if
* they are also held when dropping references to the mm. Consider the
* following call chain:
* mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
* Using mmput_async() prevents this scenario.
*
*/
struct
iommu_sva_ops
{
iommu_mm_exit_handler_t
mm_exit
;
};
#ifdef CONFIG_IOMMU_API
/**
...
...
@@ -186,6 +216,14 @@ struct iommu_resv_region {
* @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no)
* @dev_has/enable/disable_feat: per device entries to check/enable/disable
* iommu specific features.
* @dev_feat_enabled: check enabled feature
* @aux_attach/detach_dev: aux-domain specific attach/detach entries.
* @aux_get_pasid: get the pasid given an aux-domain
* @sva_bind: Bind process address space to device
* @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle
* @pgsize_bitmap: bitmap of all possible supported page sizes
*/
struct
iommu_ops
{
...
...
@@ -230,6 +268,22 @@ struct iommu_ops {
int
(
*
of_xlate
)(
struct
device
*
dev
,
struct
of_phandle_args
*
args
);
bool
(
*
is_attach_deferred
)(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
/* Per device IOMMU features */
bool
(
*
dev_has_feat
)(
struct
device
*
dev
,
enum
iommu_dev_features
f
);
bool
(
*
dev_feat_enabled
)(
struct
device
*
dev
,
enum
iommu_dev_features
f
);
int
(
*
dev_enable_feat
)(
struct
device
*
dev
,
enum
iommu_dev_features
f
);
int
(
*
dev_disable_feat
)(
struct
device
*
dev
,
enum
iommu_dev_features
f
);
/* Aux-domain specific attach/detach entries */
int
(
*
aux_attach_dev
)(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
void
(
*
aux_detach_dev
)(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
int
(
*
aux_get_pasid
)(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
struct
iommu_sva
*
(
*
sva_bind
)(
struct
device
*
dev
,
struct
mm_struct
*
mm
,
void
*
drvdata
);
void
(
*
sva_unbind
)(
struct
iommu_sva
*
handle
);
int
(
*
sva_get_pasid
)(
struct
iommu_sva
*
handle
);
unsigned
long
pgsize_bitmap
;
};
...
...
@@ -400,6 +454,14 @@ struct iommu_fwspec {
/* ATS is supported */
#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
/**
* struct iommu_sva - handle to a device-mm bond
*/
struct
iommu_sva
{
struct
device
*
dev
;
const
struct
iommu_sva_ops
*
ops
;
};
int
iommu_fwspec_init
(
struct
device
*
dev
,
struct
fwnode_handle
*
iommu_fwnode
,
const
struct
iommu_ops
*
ops
);
void
iommu_fwspec_free
(
struct
device
*
dev
);
...
...
@@ -420,6 +482,22 @@ static inline void dev_iommu_fwspec_set(struct device *dev,
int
iommu_probe_device
(
struct
device
*
dev
);
void
iommu_release_device
(
struct
device
*
dev
);
bool
iommu_dev_has_feature
(
struct
device
*
dev
,
enum
iommu_dev_features
f
);
int
iommu_dev_enable_feature
(
struct
device
*
dev
,
enum
iommu_dev_features
f
);
int
iommu_dev_disable_feature
(
struct
device
*
dev
,
enum
iommu_dev_features
f
);
bool
iommu_dev_feature_enabled
(
struct
device
*
dev
,
enum
iommu_dev_features
f
);
int
iommu_aux_attach_device
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
void
iommu_aux_detach_device
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
int
iommu_aux_get_pasid
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
struct
iommu_sva
*
iommu_sva_bind_device
(
struct
device
*
dev
,
struct
mm_struct
*
mm
,
void
*
drvdata
);
void
iommu_sva_unbind_device
(
struct
iommu_sva
*
handle
);
int
iommu_sva_set_ops
(
struct
iommu_sva
*
handle
,
const
struct
iommu_sva_ops
*
ops
);
int
iommu_sva_get_pasid
(
struct
iommu_sva
*
handle
);
#else
/* CONFIG_IOMMU_API */
struct
iommu_ops
{};
...
...
@@ -704,6 +782,68 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
return
NULL
;
}
static
inline
bool
iommu_dev_has_feature
(
struct
device
*
dev
,
enum
iommu_dev_features
feat
)
{
return
false
;
}
static
inline
bool
iommu_dev_feature_enabled
(
struct
device
*
dev
,
enum
iommu_dev_features
feat
)
{
return
false
;
}
static
inline
int
iommu_dev_enable_feature
(
struct
device
*
dev
,
enum
iommu_dev_features
feat
)
{
return
-
ENODEV
;
}
static
inline
int
iommu_dev_disable_feature
(
struct
device
*
dev
,
enum
iommu_dev_features
feat
)
{
return
-
ENODEV
;
}
static
inline
int
iommu_aux_attach_device
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
)
{
return
-
ENODEV
;
}
static
inline
void
iommu_aux_detach_device
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
)
{
}
static
inline
int
iommu_aux_get_pasid
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
)
{
return
-
ENODEV
;
}
static
inline
struct
iommu_sva
*
iommu_sva_bind_device
(
struct
device
*
dev
,
struct
mm_struct
*
mm
,
void
*
drvdata
)
{
return
NULL
;
}
static
inline
void
iommu_sva_unbind_device
(
struct
iommu_sva
*
handle
)
{
}
static
inline
int
iommu_sva_set_ops
(
struct
iommu_sva
*
handle
,
const
struct
iommu_sva_ops
*
ops
)
{
return
-
EINVAL
;
}
static
inline
int
iommu_sva_get_pasid
(
struct
iommu_sva
*
handle
)
{
return
IOMMU_PASID_INVALID
;
}
#endif
/* CONFIG_IOMMU_API */
#ifdef CONFIG_IOMMU_DEBUGFS
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment