Commit 8ae0e970 authored by Jia He's avatar Jia He Committed by Christoph Hellwig

dma-mapping: move dma_addressing_limited() out of line

This patch moves dma_addressing_limited() out of line, serving as a
preliminary step to prevent the introduction of a new publicly accessible
low-level helper when validating whether all system RAM is mapped within
the DMA mapping range.
Suggested-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJia He <justin.he@arm.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent a5e3b127
...@@ -144,6 +144,7 @@ bool dma_pci_p2pdma_supported(struct device *dev); ...@@ -144,6 +144,7 @@ bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask); int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev); u64 dma_get_required_mask(struct device *dev);
bool dma_addressing_limited(struct device *dev);
size_t dma_max_mapping_size(struct device *dev); size_t dma_max_mapping_size(struct device *dev);
size_t dma_opt_mapping_size(struct device *dev); size_t dma_opt_mapping_size(struct device *dev);
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
...@@ -264,6 +265,10 @@ static inline u64 dma_get_required_mask(struct device *dev) ...@@ -264,6 +265,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
{ {
return 0; return 0;
} }
static inline bool dma_addressing_limited(struct device *dev)
{
return false;
}
static inline size_t dma_max_mapping_size(struct device *dev) static inline size_t dma_max_mapping_size(struct device *dev)
{ {
return 0; return 0;
...@@ -465,20 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) ...@@ -465,20 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask); return dma_set_mask_and_coherent(dev, mask);
} }
/**
* dma_addressing_limited - return if the device is addressing limited
* @dev: device to check
*
* Return %true if the devices DMA mask is too small to address all memory in
* the system, else %false. Lack of addressing bits is the prime reason for
* bounce buffering, but might not be the only one.
*/
static inline bool dma_addressing_limited(struct device *dev)
{
return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
dma_get_required_mask(dev);
}
static inline unsigned int dma_get_max_seg_size(struct device *dev) static inline unsigned int dma_get_max_seg_size(struct device *dev)
{ {
if (dev->dma_parms && dev->dma_parms->max_segment_size) if (dev->dma_parms && dev->dma_parms->max_segment_size)
......
...@@ -793,6 +793,21 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) ...@@ -793,6 +793,21 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
} }
EXPORT_SYMBOL(dma_set_coherent_mask); EXPORT_SYMBOL(dma_set_coherent_mask);
/**
* dma_addressing_limited - return if the device is addressing limited
* @dev: device to check
*
* Return %true if the devices DMA mask is too small to address all memory in
* the system, else %false. Lack of addressing bits is the prime reason for
* bounce buffering, but might not be the only one.
*/
bool dma_addressing_limited(struct device *dev)
{
return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
dma_get_required_mask(dev);
}
EXPORT_SYMBOL_GPL(dma_addressing_limited);
size_t dma_max_mapping_size(struct device *dev) size_t dma_max_mapping_size(struct device *dev)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment