Commit e14cd953 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Fix a couple of compile / sparse warnings and errors

Fixes
 *) an implicit function declaration on mips,
 *) a defined but not used label on !CONFIG_INTEL_IOMMU
 *) Hopefully a couple of sparse warnings where we implicitly typecast
    integer to __le32 and vice versa.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarBrian Paul <brianp@vmware.com>
parent ea029c28
...@@ -453,12 +453,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) ...@@ -453,12 +453,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
*/ */
static int vmw_dma_select_mode(struct vmw_private *dev_priv) static int vmw_dma_select_mode(struct vmw_private *dev_priv)
{ {
const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
static const char *names[vmw_dma_map_max] = { static const char *names[vmw_dma_map_max] = {
[vmw_dma_phys] = "Using physical TTM page addresses.", [vmw_dma_phys] = "Using physical TTM page addresses.",
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_populate] = "Keeping DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."}; [vmw_dma_map_bind] = "Giving up DMA mappings early."};
#ifdef CONFIG_X86
const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_enabled) { if (intel_iommu_enabled) {
...@@ -482,7 +483,9 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) ...@@ -482,7 +483,9 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
dev_priv->map_mode = vmw_dma_map_populate; dev_priv->map_mode = vmw_dma_map_populate;
#endif #endif
#ifdef CONFIG_INTEL_IOMMU
out_fixup: out_fixup:
#endif
if (dev_priv->map_mode == vmw_dma_map_populate && if (dev_priv->map_mode == vmw_dma_map_populate &&
vmw_restrict_iommu) vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind; dev_priv->map_mode = vmw_dma_map_bind;
...@@ -498,6 +501,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) ...@@ -498,6 +501,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
return -EINVAL; return -EINVAL;
#endif #endif
#else /* CONFIG_X86 */
dev_priv->map_mode = vmw_dma_map_populate;
#endif /* CONFIG_X86 */
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0; return 0;
......
...@@ -145,7 +145,9 @@ static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, ...@@ -145,7 +145,9 @@ static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
} }
page_virtual = kmap_atomic(page); page_virtual = kmap_atomic(page);
desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT; desc_dma = (dma_addr_t)
le32_to_cpu(page_virtual[desc_per_page].ppn) <<
PAGE_SHIFT;
kunmap_atomic(page_virtual); kunmap_atomic(page_virtual);
__free_page(page); __free_page(page);
...@@ -217,7 +219,8 @@ static int vmw_gmr_build_descriptors(struct device *dev, ...@@ -217,7 +219,8 @@ static int vmw_gmr_build_descriptors(struct device *dev,
desc_dma = 0; desc_dma = 0;
list_for_each_entry_reverse(page, desc_pages, lru) { list_for_each_entry_reverse(page, desc_pages, lru) {
page_virtual = kmap_atomic(page); page_virtual = kmap_atomic(page);
page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT; page_virtual[desc_per_page].ppn = cpu_to_le32
(desc_dma >> PAGE_SHIFT);
kunmap_atomic(page_virtual); kunmap_atomic(page_virtual);
desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
DMA_TO_DEVICE); DMA_TO_DEVICE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment