Commit 96ab4c70 authored by Daniel Vetter's avatar Daniel Vetter

Merge branch 'bdw-fixes' into backlight-rework

Merge the bdw changes into the backlight rework branch so that we can
adapt the new code for bdw, too.  This is a bit a mess, but doing this
another way would have delayed the merging of the backlight
refactoring. Mea culpa.

As discussed with Jani on irc only do bdw-specific callbacks for the
set/get methods and bake in the only other special-case into the pch
enable function.

Conflicts:
	drivers/gpu/drm/i915/intel_panel.c

v2: Don't enable the PWM too early for bdw (Jani).

v3: Create new bdw_ functions for setup and enable - the rules change
sufficiently imo with the switch from controlling the pwm from the cpu
to controlling it completel from the pch to warrant this.

v4: Rip out unused pipe variable in bdw_enable_backlight (0-day
builder).

Tested-by: Ben Widawsky <ben@bwidawsk.net> (on bdw)
Reviewed-by: default avatarJani Nikula <jani.nikula@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parents 565ee389 596cc11e
...@@ -1329,7 +1329,7 @@ static u32 edid_get_quirks(struct edid *edid) ...@@ -1329,7 +1329,7 @@ static u32 edid_get_quirks(struct edid *edid)
} }
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) #define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
/** /**
* edid_fixup_preferred - set preferred modes based on quirk list * edid_fixup_preferred - set preferred modes based on quirk list
...@@ -1344,6 +1344,7 @@ static void edid_fixup_preferred(struct drm_connector *connector, ...@@ -1344,6 +1344,7 @@ static void edid_fixup_preferred(struct drm_connector *connector,
{ {
struct drm_display_mode *t, *cur_mode, *preferred_mode; struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0; int target_refresh = 0;
int cur_vrefresh, preferred_vrefresh;
if (list_empty(&connector->probed_modes)) if (list_empty(&connector->probed_modes))
return; return;
...@@ -1366,10 +1367,14 @@ static void edid_fixup_preferred(struct drm_connector *connector, ...@@ -1366,10 +1367,14 @@ static void edid_fixup_preferred(struct drm_connector *connector,
if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
preferred_mode = cur_mode; preferred_mode = cur_mode;
cur_vrefresh = cur_mode->vrefresh ?
cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
preferred_vrefresh = preferred_mode->vrefresh ?
preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
/* At a given size, try to get closest to target refresh */ /* At a given size, try to get closest to target refresh */
if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
MODE_REFRESH_DIFF(cur_mode, target_refresh) < MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
preferred_mode = cur_mode; preferred_mode = cur_mode;
} }
} }
......
...@@ -1755,8 +1755,13 @@ struct drm_i915_file_private { ...@@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0C00) ((dev)->pdev->device & 0xFF00) == 0x0C00)
#define IS_ULT(dev) (IS_HASWELL(dev) && \ #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
(((dev)->pdev->device & 0xf) == 0x2 || \
((dev)->pdev->device & 0xf) == 0x6 || \
((dev)->pdev->device & 0xf) == 0xe))
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0A00) ((dev)->pdev->device & 0xFF00) == 0x0A00)
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020) ((dev)->pdev->device & 0x00F0) == 0x0020)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
......
...@@ -335,8 +335,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) ...@@ -335,8 +335,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
kfree(ppgtt->gen8_pt_dma_addr[i]); kfree(ppgtt->gen8_pt_dma_addr[i]);
} }
__free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
__free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
} }
/** /**
...@@ -1239,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) ...@@ -1239,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl) if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl; bdw_gmch_ctl = 1 << bdw_gmch_ctl;
if (bdw_gmch_ctl > 4) {
WARN_ON(!i915_preliminary_hw_support);
return 4<<20;
}
return bdw_gmch_ctl << 20; return bdw_gmch_ctl << 20;
} }
......
...@@ -859,7 +859,9 @@ int intel_opregion_setup(struct drm_device *dev) ...@@ -859,7 +859,9 @@ int intel_opregion_setup(struct drm_device *dev)
return -ENOTSUPP; return -ENOTSUPP;
} }
#ifdef CONFIG_ACPI
INIT_WORK(&opregion->asle_work, asle_work); INIT_WORK(&opregion->asle_work, asle_work);
#endif
base = acpi_os_ioremap(asls, OPREGION_SIZE); base = acpi_os_ioremap(asls, OPREGION_SIZE);
if (!base) if (!base)
......
...@@ -352,6 +352,14 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, ...@@ -352,6 +352,14 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
return val; return val;
} }
static u32 bdw_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 pch_get_backlight(struct intel_connector *connector) static u32 pch_get_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_device *dev = connector->base.dev;
...@@ -414,6 +422,14 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) ...@@ -414,6 +422,14 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
return val; return val;
} }
static void bdw_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
}
static void pch_set_backlight(struct intel_connector *connector, u32 level) static void pch_set_backlight(struct intel_connector *connector, u32 level)
{ {
struct drm_device *dev = connector->base.dev; struct drm_device *dev = connector->base.dev;
...@@ -585,6 +601,38 @@ void intel_panel_disable_backlight(struct intel_connector *connector) ...@@ -585,6 +601,38 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
} }
static void bdw_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2;
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
DRM_DEBUG_KMS("pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
}
pch_ctl2 = panel->backlight.max << 16;
I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
/* BDW always uses the pch pwm controls. */
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
POSTING_READ(BLC_PWM_PCH_CTL1);
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
}
static void pch_enable_backlight(struct intel_connector *connector) static void pch_enable_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_device *dev = connector->base.dev;
...@@ -626,6 +674,7 @@ static void pch_enable_backlight(struct intel_connector *connector) ...@@ -626,6 +674,7 @@ static void pch_enable_backlight(struct intel_connector *connector)
pch_ctl1 = 0; pch_ctl1 = 0;
if (panel->backlight.active_low_pwm) if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY; pch_ctl1 |= BLM_PCH_POLARITY;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
POSTING_READ(BLC_PWM_PCH_CTL1); POSTING_READ(BLC_PWM_PCH_CTL1);
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
...@@ -869,6 +918,30 @@ static void intel_backlight_device_unregister(struct intel_connector *connector) ...@@ -869,6 +918,30 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
* XXX: Query mode clock or hardware clock and program PWM modulation frequency * XXX: Query mode clock or hardware clock and program PWM modulation frequency
* appropriately when it's 0. Use VBT and/or sane defaults. * appropriately when it's 0. Use VBT and/or sane defaults.
*/ */
static int bdw_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, val;
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
if (!panel->backlight.max)
return -ENODEV;
val = bdw_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
panel->backlight.level != 0;
return 0;
}
static int pch_setup_backlight(struct intel_connector *connector) static int pch_setup_backlight(struct intel_connector *connector)
{ {
struct drm_device *dev = connector->base.dev; struct drm_device *dev = connector->base.dev;
...@@ -1036,7 +1109,13 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev) ...@@ -1036,7 +1109,13 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_PCH_SPLIT(dev)) { if (IS_BROADWELL(dev)) {
dev_priv->display.setup_backlight = bdw_setup_backlight;
dev_priv->display.enable_backlight = bdw_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
dev_priv->display.set_backlight = bdw_set_backlight;
dev_priv->display.get_backlight = bdw_get_backlight;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.setup_backlight = pch_setup_backlight; dev_priv->display.setup_backlight = pch_setup_backlight;
dev_priv->display.enable_backlight = pch_enable_backlight; dev_priv->display.enable_backlight = pch_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight; dev_priv->display.disable_backlight = pch_disable_backlight;
......
...@@ -5684,6 +5684,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) ...@@ -5684,6 +5684,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested; bool is_enabled, enable_requested;
unsigned long irqflags;
uint32_t tmp; uint32_t tmp;
tmp = I915_READ(HSW_PWR_WELL_DRIVER); tmp = I915_READ(HSW_PWR_WELL_DRIVER);
...@@ -5701,9 +5702,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) ...@@ -5701,9 +5702,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
HSW_PWR_WELL_STATE_ENABLED), 20)) HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n"); DRM_ERROR("Timeout enabling power well\n");
} }
if (IS_BROADWELL(dev)) {
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
dev_priv->de_irq_mask[PIPE_B]);
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
~dev_priv->de_irq_mask[PIPE_B] |
GEN8_PIPE_VBLANK);
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
dev_priv->de_irq_mask[PIPE_C]);
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
~dev_priv->de_irq_mask[PIPE_C] |
GEN8_PIPE_VBLANK);
POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
} else { } else {
if (enable_requested) { if (enable_requested) {
unsigned long irqflags;
enum pipe p; enum pipe p;
I915_WRITE(HSW_PWR_WELL_DRIVER, 0); I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
......
...@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) ...@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
} else if (IS_GEN6(ring->dev)) { } else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base); mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else { } else {
/* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base); mmio = RING_HWS_PGA(ring->mmio_base);
} }
......
...@@ -782,6 +782,7 @@ static int gen6_do_reset(struct drm_device *dev) ...@@ -782,6 +782,7 @@ static int gen6_do_reset(struct drm_device *dev)
int intel_gpu_reset(struct drm_device *dev) int intel_gpu_reset(struct drm_device *dev)
{ {
switch (INTEL_INFO(dev)->gen) { switch (INTEL_INFO(dev)->gen) {
case 8:
case 7: case 7:
case 6: return gen6_do_reset(dev); case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev); case 5: return ironlake_do_reset(dev);
......
...@@ -102,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -102,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int retval = VM_FAULT_NOPAGE; int retval = VM_FAULT_NOPAGE;
struct ttm_mem_type_manager *man = struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type]; &bdev->man[bo->mem.mem_type];
struct vm_area_struct cvma;
/* /*
* Work around locking order reversal in fault / nopfn * Work around locking order reversal in fault / nopfn
...@@ -164,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -164,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
/* /*
* Strictly, we're not allowed to modify vma->vm_page_prot here, * Make a local vma copy to modify the page_prot member
* since the mmap_sem is only held in read mode. However, we * and vm_flags if necessary. The vma parameter is protected
* modify only the caching bits of vma->vm_page_prot and * by mmap_sem in write mode.
* consider those bits protected by
* the bo->mutex, as we should be the only writers.
* There shouldn't really be any readers of these bits except
* within vm_insert_mixed()? fork?
*
* TODO: Add a list of vmas to the bo, and change the
* vma->vm_page_prot when the object changes caching policy, with
* the correct locks held.
*/ */
cvma = *vma;
cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
if (bo->mem.bus.is_iomem) { if (bo->mem.bus.is_iomem) {
vma->vm_page_prot = ttm_io_prot(bo->mem.placement, cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
vma->vm_page_prot); cvma.vm_page_prot);
} else { } else {
ttm = bo->ttm; ttm = bo->ttm;
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
vm_get_page_prot(vma->vm_flags) : cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
ttm_io_prot(bo->mem.placement, vma->vm_page_prot); cvma.vm_page_prot);
/* Allocate all page at once, most common usage */ /* Allocate all page at once, most common usage */
if (ttm->bdev->driver->ttm_tt_populate(ttm)) { if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
...@@ -210,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -210,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
} }
ret = vm_insert_mixed(vma, address, pfn); ret = vm_insert_mixed(&cvma, address, pfn);
/* /*
* Somebody beat us to this PTE or prefaulting to * Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error. * an already populated PTE, or prefaulting error.
......
...@@ -453,12 +453,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) ...@@ -453,12 +453,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
*/ */
static int vmw_dma_select_mode(struct vmw_private *dev_priv) static int vmw_dma_select_mode(struct vmw_private *dev_priv)
{ {
const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
static const char *names[vmw_dma_map_max] = { static const char *names[vmw_dma_map_max] = {
[vmw_dma_phys] = "Using physical TTM page addresses.", [vmw_dma_phys] = "Using physical TTM page addresses.",
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_populate] = "Keeping DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."}; [vmw_dma_map_bind] = "Giving up DMA mappings early."};
#ifdef CONFIG_X86
const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_enabled) { if (intel_iommu_enabled) {
...@@ -500,6 +501,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) ...@@ -500,6 +501,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
return -EINVAL; return -EINVAL;
#endif #endif
#else /* CONFIG_X86 */
dev_priv->map_mode = vmw_dma_map_populate;
#endif /* CONFIG_X86 */
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0; return 0;
......
...@@ -145,7 +145,9 @@ static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, ...@@ -145,7 +145,9 @@ static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
} }
page_virtual = kmap_atomic(page); page_virtual = kmap_atomic(page);
desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT; desc_dma = (dma_addr_t)
le32_to_cpu(page_virtual[desc_per_page].ppn) <<
PAGE_SHIFT;
kunmap_atomic(page_virtual); kunmap_atomic(page_virtual);
__free_page(page); __free_page(page);
...@@ -217,7 +219,8 @@ static int vmw_gmr_build_descriptors(struct device *dev, ...@@ -217,7 +219,8 @@ static int vmw_gmr_build_descriptors(struct device *dev,
desc_dma = 0; desc_dma = 0;
list_for_each_entry_reverse(page, desc_pages, lru) { list_for_each_entry_reverse(page, desc_pages, lru) {
page_virtual = kmap_atomic(page); page_virtual = kmap_atomic(page);
page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT; page_virtual[desc_per_page].ppn = cpu_to_le32
(desc_dma >> PAGE_SHIFT);
kunmap_atomic(page_virtual); kunmap_atomic(page_virtual);
desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
DMA_TO_DEVICE); DMA_TO_DEVICE);
......
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "vmwgfx_resource_priv.h" #include "vmwgfx_resource_priv.h"
#define VMW_RES_EVICT_ERR_COUNT 10
struct vmw_user_dma_buffer { struct vmw_user_dma_buffer {
struct ttm_base_object base; struct ttm_base_object base;
struct vmw_dma_buffer dma; struct vmw_dma_buffer dma;
...@@ -1091,8 +1093,9 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -1091,8 +1093,9 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
* to a backup buffer. * to a backup buffer.
* *
* @res: The resource to evict. * @res: The resource to evict.
* @interruptible: Whether to wait interruptible.
*/ */
int vmw_resource_do_evict(struct vmw_resource *res) int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{ {
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func; const struct vmw_res_func *func = res->func;
...@@ -1102,7 +1105,8 @@ int vmw_resource_do_evict(struct vmw_resource *res) ...@@ -1102,7 +1105,8 @@ int vmw_resource_do_evict(struct vmw_resource *res)
BUG_ON(!func->may_evict); BUG_ON(!func->may_evict);
val_buf.bo = NULL; val_buf.bo = NULL;
ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf); ret = vmw_resource_check_buffer(res, &ticket, interruptible,
&val_buf);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1141,6 +1145,7 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -1141,6 +1145,7 @@ int vmw_resource_validate(struct vmw_resource *res)
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
unsigned err_count = 0;
if (likely(!res->func->may_evict)) if (likely(!res->func->may_evict))
return 0; return 0;
...@@ -1155,7 +1160,7 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -1155,7 +1160,7 @@ int vmw_resource_validate(struct vmw_resource *res)
write_lock(&dev_priv->resource_lock); write_lock(&dev_priv->resource_lock);
if (list_empty(lru_list) || !res->func->may_evict) { if (list_empty(lru_list) || !res->func->may_evict) {
DRM_ERROR("Out of device device id entries " DRM_ERROR("Out of device device resources "
"for %s.\n", res->func->type_name); "for %s.\n", res->func->type_name);
ret = -EBUSY; ret = -EBUSY;
write_unlock(&dev_priv->resource_lock); write_unlock(&dev_priv->resource_lock);
...@@ -1168,7 +1173,19 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -1168,7 +1173,19 @@ int vmw_resource_validate(struct vmw_resource *res)
list_del_init(&evict_res->lru_head); list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock); write_unlock(&dev_priv->resource_lock);
vmw_resource_do_evict(evict_res);
ret = vmw_resource_do_evict(evict_res, true);
if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
write_unlock(&dev_priv->resource_lock);
if (ret == -ERESTARTSYS ||
++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res);
goto out_no_validate;
}
}
vmw_resource_unreference(&evict_res); vmw_resource_unreference(&evict_res);
} while (1); } while (1);
...@@ -1253,13 +1270,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res) ...@@ -1253,13 +1270,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res)
* @type: The resource type to evict * @type: The resource type to evict
* *
* To avoid thrashing starvation or as part of the hibernation sequence, * To avoid thrashing starvation or as part of the hibernation sequence,
* evict all evictable resources of a specific type. * try to evict all evictable resources of a specific type.
*/ */
static void vmw_resource_evict_type(struct vmw_private *dev_priv, static void vmw_resource_evict_type(struct vmw_private *dev_priv,
enum vmw_res_type type) enum vmw_res_type type)
{ {
struct list_head *lru_list = &dev_priv->res_lru[type]; struct list_head *lru_list = &dev_priv->res_lru[type];
struct vmw_resource *evict_res; struct vmw_resource *evict_res;
unsigned err_count = 0;
int ret;
do { do {
write_lock(&dev_priv->resource_lock); write_lock(&dev_priv->resource_lock);
...@@ -1272,7 +1291,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, ...@@ -1272,7 +1291,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
lru_head)); lru_head));
list_del_init(&evict_res->lru_head); list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock); write_unlock(&dev_priv->resource_lock);
vmw_resource_do_evict(evict_res);
ret = vmw_resource_do_evict(evict_res, false);
if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
write_unlock(&dev_priv->resource_lock);
if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res);
return;
}
}
vmw_resource_unreference(&evict_res); vmw_resource_unreference(&evict_res);
} while (1); } while (1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment