Commit 1e9124df authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-fixes-2022-06-20' of https://gitlab.freedesktop.org/drm/msm into drm-fixes

Fixes for v5.19-rc4

- Workaround for parade DSI bridge power sequencing
- Fix for multi-planar YUV format offsets
- Limiting WB modes to max sspp linewidth
- Fixing the supported rotations to add 180 back for IGT
- Fix to handle pm_runtime_get_sync() errors to avoid unclocked access
  in the bind() path for dpu driver
- Fix the irq_free() without request issue which was a being hit frequently
  in CI.
- Fix to add minimum ICC vote in the msm_mdss pm_resume path to address
  bootup splats
- Fix to avoid dereferencing without checking in WB encoder
- Fix to avoid crash during suspend in DP driver by ensuring interrupt
  mask bits are updated
- Remove unused code from dpu_encoder_virt_atomic_check()
- Fix to remove redundant init of dsc variable
- Fix to ensure mmap offset is initialized to avoid memory corruption
  from unpin/evict
- Fix double runpm disable in probe-defer path
- VMA fenced-unpin fixes
- Fix for WB max-width
- Fix for rare dp resolution change issue
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvdsOF1-+WfTWyEyu33XPcvxOCU00G-dz7EF2J+fdyUHg@mail.gmail.com
parents 08d27daa a6e2af64
...@@ -498,10 +498,15 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -498,10 +498,15 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->cur = ring->start; ring->cur = ring->start;
ring->next = ring->start; ring->next = ring->start;
/* reset completed fence seqno: */
ring->memptrs->fence = ring->fctx->completed_fence;
ring->memptrs->rptr = 0; ring->memptrs->rptr = 0;
/* Detect and clean up an impossible fence, ie. if GPU managed
* to scribble something invalid, we don't want that to confuse
* us into mistakingly believing that submits have completed.
*/
if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
ring->memptrs->fence = ring->fctx->last_fence;
}
} }
return 0; return 0;
...@@ -1057,7 +1062,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) ...@@ -1057,7 +1062,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]); release_firmware(adreno_gpu->fw[i]);
pm_runtime_disable(&priv->gpu_pdev->dev); if (pm_runtime_enabled(&priv->gpu_pdev->dev))
pm_runtime_disable(&priv->gpu_pdev->dev);
msm_gpu_cleanup(&adreno_gpu->base); msm_gpu_cleanup(&adreno_gpu->base);
} }
...@@ -11,7 +11,14 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector) ...@@ -11,7 +11,14 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector)
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_linewidth, /*
* We should ideally be limiting the modes only to the maxlinewidth but
* on some chipsets this will allow even 4k modes to be added which will
* fail the per SSPP bandwidth checks. So, till we have dual-SSPP support
* and source split support added lets limit the modes based on max_mixer_width
* as 4K modes can then be supported.
*/
return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width,
dev->mode_config.max_height); dev->mode_config.max_height);
} }
......
...@@ -216,6 +216,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, ...@@ -216,6 +216,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_lcdc_encoder_init(dev, panel_node); encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) { if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n"); DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
of_node_put(panel_node);
return PTR_ERR(encoder); return PTR_ERR(encoder);
} }
...@@ -225,6 +226,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, ...@@ -225,6 +226,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
connector = mdp4_lvds_connector_init(dev, panel_node, encoder); connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) { if (IS_ERR(connector)) {
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n"); DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
of_node_put(panel_node);
return PTR_ERR(connector); return PTR_ERR(connector);
} }
......
...@@ -1534,6 +1534,8 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) ...@@ -1534,6 +1534,8 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
return ret; return ret;
} }
static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
{ {
int ret = 0; int ret = 0;
...@@ -1557,7 +1559,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) ...@@ -1557,7 +1559,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
ret = dp_ctrl_on_link(&ctrl->dp_ctrl); ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
if (!ret) if (!ret)
ret = dp_ctrl_on_stream(&ctrl->dp_ctrl); ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
else else
DRM_ERROR("failed to enable DP link controller\n"); DRM_ERROR("failed to enable DP link controller\n");
...@@ -1813,7 +1815,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) ...@@ -1813,7 +1815,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
return dp_ctrl_setup_main_link(ctrl, &training_step); return dp_ctrl_setup_main_link(ctrl, &training_step);
} }
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
{
int ret;
struct dp_ctrl_private *ctrl;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
ret = dp_ctrl_enable_stream_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
return ret;
}
dp_ctrl_send_phy_test_pattern(ctrl);
return 0;
}
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
{ {
int ret = 0; int ret = 0;
bool mainlink_ready = false; bool mainlink_ready = false;
...@@ -1849,12 +1871,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) ...@@ -1849,12 +1871,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
goto end; goto end;
} }
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
dp_ctrl_send_phy_test_pattern(ctrl);
return 0;
}
if (!dp_ctrl_channel_eq_ok(ctrl))
dp_ctrl_link_retrain(ctrl); dp_ctrl_link_retrain(ctrl);
/* stop txing train pattern to end link training */ /* stop txing train pattern to end link training */
......
...@@ -21,7 +21,7 @@ struct dp_ctrl { ...@@ -21,7 +21,7 @@ struct dp_ctrl {
}; };
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl); int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl); int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl); int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off(struct dp_ctrl *dp_ctrl); int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
......
...@@ -309,7 +309,8 @@ static void dp_display_unbind(struct device *dev, struct device *master, ...@@ -309,7 +309,8 @@ static void dp_display_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master); struct msm_drm_private *priv = dev_get_drvdata(master);
/* disable all HPD interrupts */ /* disable all HPD interrupts */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); if (dp->core_initialized)
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
kthread_stop(dp->ev_tsk); kthread_stop(dp->ev_tsk);
...@@ -872,7 +873,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data) ...@@ -872,7 +873,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
return 0; return 0;
} }
rc = dp_ctrl_on_stream(dp->ctrl); rc = dp_ctrl_on_stream(dp->ctrl, data);
if (!rc) if (!rc)
dp_display->power_on = true; dp_display->power_on = true;
...@@ -1659,6 +1660,7 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge) ...@@ -1659,6 +1660,7 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
int rc = 0; int rc = 0;
struct dp_display_private *dp_display; struct dp_display_private *dp_display;
u32 state; u32 state;
bool force_link_train = false;
dp_display = container_of(dp, struct dp_display_private, dp_display); dp_display = container_of(dp, struct dp_display_private, dp_display);
if (!dp_display->dp_mode.drm_mode.clock) { if (!dp_display->dp_mode.drm_mode.clock) {
...@@ -1693,10 +1695,12 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge) ...@@ -1693,10 +1695,12 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
state = dp_display->hpd_state; state = dp_display->hpd_state;
if (state == ST_DISPLAY_OFF) if (state == ST_DISPLAY_OFF) {
dp_display_host_phy_init(dp_display); dp_display_host_phy_init(dp_display);
force_link_train = true;
}
dp_display_enable(dp_display, 0); dp_display_enable(dp_display, force_link_train);
rc = dp_display_post_enable(dp); rc = dp_display_post_enable(dp);
if (rc) { if (rc) {
...@@ -1705,10 +1709,6 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge) ...@@ -1705,10 +1709,6 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
dp_display_unprepare(dp); dp_display_unprepare(dp);
} }
/* manual kick off plug event to train link */
if (state == ST_DISPLAY_OFF)
dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
/* completed connection */ /* completed connection */
dp_display->hpd_state = ST_CONNECTED; dp_display->hpd_state = ST_CONNECTED;
......
...@@ -964,7 +964,7 @@ static const struct drm_driver msm_driver = { ...@@ -964,7 +964,7 @@ static const struct drm_driver msm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
.gem_prime_mmap = drm_gem_prime_mmap, .gem_prime_mmap = msm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init, .debugfs_init = msm_debugfs_init,
#endif #endif
......
...@@ -246,6 +246,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t ...@@ -246,6 +246,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t
void msm_gem_shrinker_init(struct drm_device *dev); void msm_gem_shrinker_init(struct drm_device *dev);
void msm_gem_shrinker_cleanup(struct drm_device *dev); void msm_gem_shrinker_cleanup(struct drm_device *dev);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
......
...@@ -46,12 +46,14 @@ bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence) ...@@ -46,12 +46,14 @@ bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
(int32_t)(*fctx->fenceptr - fence) >= 0; (int32_t)(*fctx->fenceptr - fence) >= 0;
} }
/* called from workqueue */ /* called from irq handler and workqueue (in recover path) */
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
{ {
spin_lock(&fctx->spinlock); unsigned long flags;
spin_lock_irqsave(&fctx->spinlock, flags);
fctx->completed_fence = max(fence, fctx->completed_fence); fctx->completed_fence = max(fence, fctx->completed_fence);
spin_unlock(&fctx->spinlock); spin_unlock_irqrestore(&fctx->spinlock, flags);
} }
struct msm_fence { struct msm_fence {
......
...@@ -439,14 +439,12 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) ...@@ -439,14 +439,12 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
return ret; return ret;
} }
void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) void msm_gem_unpin_locked(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_unpin_vma(vma);
msm_obj->pin_count--; msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0); GEM_WARN_ON(msm_obj->pin_count < 0);
...@@ -586,7 +584,8 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, ...@@ -586,7 +584,8 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
msm_gem_lock(obj); msm_gem_lock(obj);
vma = lookup_vma(obj, aspace); vma = lookup_vma(obj, aspace);
if (!GEM_WARN_ON(!vma)) { if (!GEM_WARN_ON(!vma)) {
msm_gem_unpin_vma_locked(obj, vma); msm_gem_unpin_vma(vma);
msm_gem_unpin_locked(obj);
} }
msm_gem_unlock(obj); msm_gem_unlock(obj);
} }
......
...@@ -145,7 +145,7 @@ struct msm_gem_object { ...@@ -145,7 +145,7 @@ struct msm_gem_object {
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma); int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma); void msm_gem_unpin_locked(struct drm_gem_object *obj);
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace); struct msm_gem_address_space *aspace);
int msm_gem_get_iova(struct drm_gem_object *obj, int msm_gem_get_iova(struct drm_gem_object *obj,
...@@ -377,10 +377,11 @@ struct msm_gem_submit { ...@@ -377,10 +377,11 @@ struct msm_gem_submit {
} *cmd; /* array of size nr_cmds */ } *cmd; /* array of size nr_cmds */
struct { struct {
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
#define BO_LOCKED 0x4000 /* obj lock is held */ #define BO_LOCKED 0x4000 /* obj lock is held */
#define BO_ACTIVE 0x2000 /* active refcnt is held */ #define BO_ACTIVE 0x2000 /* active refcnt is held */
#define BO_PINNED 0x1000 /* obj is pinned and on active list */ #define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */
#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */
uint32_t flags; uint32_t flags;
union { union {
struct msm_gem_object *obj; struct msm_gem_object *obj;
......
...@@ -11,6 +11,21 @@ ...@@ -11,6 +11,21 @@
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_gem.h" #include "msm_gem.h"
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
/* Ensure the mmap offset is initialized. We lazily initialize it,
* so if it has not been first mmap'd directly as a GEM object, the
* mmap offset will not be already initialized.
*/
ret = drm_gem_create_mmap_offset(obj);
if (ret)
return ret;
return drm_gem_prime_mmap(obj, vma);
}
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
......
...@@ -232,8 +232,11 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, ...@@ -232,8 +232,11 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
*/ */
submit->bos[i].flags &= ~cleanup_flags; submit->bos[i].flags &= ~cleanup_flags;
if (flags & BO_PINNED) if (flags & BO_VMA_PINNED)
msm_gem_unpin_vma_locked(obj, submit->bos[i].vma); msm_gem_unpin_vma(submit->bos[i].vma);
if (flags & BO_OBJ_PINNED)
msm_gem_unpin_locked(obj);
if (flags & BO_ACTIVE) if (flags & BO_ACTIVE)
msm_gem_active_put(obj); msm_gem_active_put(obj);
...@@ -244,7 +247,9 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, ...@@ -244,7 +247,9 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
{ {
submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE | BO_LOCKED); unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
BO_ACTIVE | BO_LOCKED;
submit_cleanup_bo(submit, i, cleanup_flags);
if (!(submit->bos[i].flags & BO_VALID)) if (!(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0; submit->bos[i].iova = 0;
...@@ -375,7 +380,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) ...@@ -375,7 +380,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
if (ret) if (ret)
break; break;
submit->bos[i].flags |= BO_PINNED; submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
submit->bos[i].vma = vma; submit->bos[i].vma = vma;
if (vma->iova == submit->bos[i].iova) { if (vma->iova == submit->bos[i].iova) {
...@@ -511,7 +516,7 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error) ...@@ -511,7 +516,7 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error)
unsigned i; unsigned i;
if (error) if (error)
cleanup_flags |= BO_PINNED | BO_ACTIVE; cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
for (i = 0; i < submit->nr_bos; i++) { for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
...@@ -529,7 +534,8 @@ void msm_submit_retire(struct msm_gem_submit *submit) ...@@ -529,7 +534,8 @@ void msm_submit_retire(struct msm_gem_submit *submit)
struct drm_gem_object *obj = &submit->bos[i].obj->base; struct drm_gem_object *obj = &submit->bos[i].obj->base;
msm_gem_lock(obj); msm_gem_lock(obj);
submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE); /* Note, VMA already fence-unpinned before submit: */
submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
msm_gem_unlock(obj); msm_gem_unlock(obj);
drm_gem_object_put(obj); drm_gem_object_put(obj);
} }
......
...@@ -62,8 +62,7 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, ...@@ -62,8 +62,7 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
unsigned size = vma->node.size; unsigned size = vma->node.size;
/* Print a message if we try to purge a vma in use */ /* Print a message if we try to purge a vma in use */
if (GEM_WARN_ON(msm_gem_vma_inuse(vma))) GEM_WARN_ON(msm_gem_vma_inuse(vma));
return;
/* Don't do anything if the memory isn't mapped */ /* Don't do anything if the memory isn't mapped */
if (!vma->mapped) if (!vma->mapped)
...@@ -128,8 +127,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, ...@@ -128,8 +127,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
void msm_gem_close_vma(struct msm_gem_address_space *aspace, void msm_gem_close_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma) struct msm_gem_vma *vma)
{ {
if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped)) GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
return;
spin_lock(&aspace->lock); spin_lock(&aspace->lock);
if (vma->iova) if (vma->iova)
......
...@@ -164,24 +164,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu) ...@@ -164,24 +164,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
return ret; return ret;
} }
static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
uint32_t fence)
{
struct msm_gem_submit *submit;
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node) {
if (fence_after(submit->seqno, fence))
break;
msm_update_fence(submit->ring->fctx,
submit->hw_fence->seqno);
dma_fence_signal(submit->hw_fence);
}
spin_unlock_irqrestore(&ring->submit_lock, flags);
}
#ifdef CONFIG_DEV_COREDUMP #ifdef CONFIG_DEV_COREDUMP
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset, static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen) size_t count, void *data, size_t datalen)
...@@ -436,9 +418,9 @@ static void recover_worker(struct kthread_work *work) ...@@ -436,9 +418,9 @@ static void recover_worker(struct kthread_work *work)
* one more to clear the faulting submit * one more to clear the faulting submit
*/ */
if (ring == cur_ring) if (ring == cur_ring)
fence++; ring->memptrs->fence = ++fence;
update_fences(gpu, ring, fence); msm_update_fence(ring->fctx, fence);
} }
if (msm_gpu_active(gpu)) { if (msm_gpu_active(gpu)) {
...@@ -672,7 +654,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, ...@@ -672,7 +654,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
msm_submit_retire(submit); msm_submit_retire(submit);
pm_runtime_mark_last_busy(&gpu->pdev->dev); pm_runtime_mark_last_busy(&gpu->pdev->dev);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
spin_lock_irqsave(&ring->submit_lock, flags); spin_lock_irqsave(&ring->submit_lock, flags);
list_del(&submit->node); list_del(&submit->node);
...@@ -686,6 +667,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, ...@@ -686,6 +667,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
msm_devfreq_idle(gpu); msm_devfreq_idle(gpu);
mutex_unlock(&gpu->active_lock); mutex_unlock(&gpu->active_lock);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
msm_gem_submit_put(submit); msm_gem_submit_put(submit);
} }
...@@ -735,7 +718,7 @@ void msm_gpu_retire(struct msm_gpu *gpu) ...@@ -735,7 +718,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
int i; int i;
for (i = 0; i < gpu->nr_rings; i++) for (i = 0; i < gpu->nr_rings; i++)
update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
kthread_queue_work(gpu->worker, &gpu->retire_work); kthread_queue_work(gpu->worker, &gpu->retire_work);
update_sw_cntrs(gpu); update_sw_cntrs(gpu);
......
...@@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, ...@@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
u64 addr = iova; u64 addr = iova;
unsigned int i; unsigned int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) { for_each_sgtable_sg(sgt, sg, i) {
size_t size = sg->length; size_t size = sg->length;
phys_addr_t phys = sg_phys(sg); phys_addr_t phys = sg_phys(sg);
......
...@@ -25,7 +25,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) ...@@ -25,7 +25,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_gem_lock(obj); msm_gem_lock(obj);
msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx); msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
submit->bos[i].flags &= ~BO_PINNED; submit->bos[i].flags &= ~BO_VMA_PINNED;
msm_gem_unlock(obj); msm_gem_unlock(obj);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment