Commit 3a3c5ab3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2021-06-04-1' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Two big regression reverts in here, one for fbdev and one i915.
  Otherwise it's mostly amdgpu display fixes, and tegra fixes.

  fb:
   - revert broken fb_defio patch

  amdgpu:
   - Display fixes
   - FRU EEPROM error handling fix
   - RAS fix
   - PSP fix
   - Releasing pinned BO fix

  i915:
   - Revert conversion to io_mapping_map_user() which lead to BUG_ON()
   - Fix check for error valued returns in a selftest

  tegra:
   - SOR power domain race condition fix
   - build warning fix
   - runtime pm ref leak fix
   - modifier fix"

* tag 'drm-fixes-2021-06-04-1' of git://anongit.freedesktop.org/drm/drm:
  amd/display: convert DRM_DEBUG_ATOMIC to drm_dbg_atomic
  drm/amdgpu: make sure we unpin the UVD BO
  drm/amd/amdgpu:save psp ring wptr to avoid attack
  drm/amd/display: Fix potential memory leak in DMUB hw_init
  drm/amdgpu: Don't query CE and UE errors
  drm/amd/display: Fix overlay validation by considering cursors
  drm/amdgpu: refine amdgpu_fru_get_product_info
  drm/amdgpu: add judgement for dc support
  drm/amd/display: Fix GPU scaling regression by FS video support
  drm/amd/display: Allow bandwidth validation for 0 streams.
  Revert "i915: use io_mapping_map_user"
  drm/i915/selftests: Fix return value check in live_breadcrumbs_smoketest()
  Revert "fb_defio: Remove custom address_space_operations"
  drm/tegra: Correct DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT
  drm/tegra: sor: Fix AUX device reference leak
  drm/tegra: Get ref for DP AUX channel, not its ddc adapter
  drm/tegra: Fix shift overflow in tegra_shared_plane_atomic_update
  drm/tegra: sor: Fully initialize SOR before registration
  gpu: host1x: Split up client initalization and registration
  drm/tegra: sor: Do not leak runtime PM reference
parents f88cd3fb 37e2f2e8
...@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, ...@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
{ {
struct amdgpu_ctx *ctx; struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr; struct amdgpu_ctx_mgr *mgr;
unsigned long ras_counter;
if (!fpriv) if (!fpriv)
return -EINVAL; return -EINVAL;
...@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, ...@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty)) if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
/*query ue count*/
ras_counter = amdgpu_ras_query_error_count(adev, false);
/*ras counter is monotonic increasing*/
if (ras_counter != ctx->ras_counter_ue) {
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
ctx->ras_counter_ue = ras_counter;
}
/*query ce count*/
ras_counter = amdgpu_ras_query_error_count(adev, true);
if (ras_counter != ctx->ras_counter_ce) {
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
ctx->ras_counter_ce = ras_counter;
}
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
return 0; return 0;
} }
......
...@@ -3118,7 +3118,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) ...@@ -3118,7 +3118,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
*/ */
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{ {
if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display) if (amdgpu_sriov_vf(adev) ||
adev->enable_virtual_display ||
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false; return false;
return amdgpu_device_asic_has_dc_support(adev->asic_type); return amdgpu_device_asic_has_dc_support(adev->asic_type);
......
...@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr, ...@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
int amdgpu_fru_get_product_info(struct amdgpu_device *adev) int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{ {
unsigned char buff[34]; unsigned char buff[34];
int addrptr = 0, size = 0; int addrptr, size;
int len;
if (!is_fru_eeprom_supported(adev)) if (!is_fru_eeprom_supported(adev))
return 0; return 0;
...@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) ...@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */ /* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.smu_i2c.algo) { if (!adev->pm.smu_i2c.algo) {
DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
return 0; return -ENODEV;
} }
/* There's a lot of repetition here. This is due to the FRU having /* There's a lot of repetition here. This is due to the FRU having
...@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) ...@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff); size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) { if (size < 1) {
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size); DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
return size; return -EINVAL;
} }
/* Increment the addrptr by the size of the field, and 1 due to the /* Increment the addrptr by the size of the field, and 1 due to the
...@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) ...@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff); size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) { if (size < 1) {
DRM_ERROR("Failed to read FRU product name, ret:%d", size); DRM_ERROR("Failed to read FRU product name, ret:%d", size);
return size; return -EINVAL;
} }
len = size;
/* Product name should only be 32 characters. Any more, /* Product name should only be 32 characters. Any more,
* and something could be wrong. Cap it at 32 to be safe * and something could be wrong. Cap it at 32 to be safe
*/ */
if (size > 32) { if (len >= sizeof(adev->product_name)) {
DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake"); DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
size = 32; len = sizeof(adev->product_name) - 1;
} }
/* Start at 2 due to buff using fields 0 and 1 for the address */ /* Start at 2 due to buff using fields 0 and 1 for the address */
memcpy(adev->product_name, &buff[2], size); memcpy(adev->product_name, &buff[2], len);
adev->product_name[size] = '\0'; adev->product_name[len] = '\0';
addrptr += size + 1; addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff); size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) { if (size < 1) {
DRM_ERROR("Failed to read FRU product number, ret:%d", size); DRM_ERROR("Failed to read FRU product number, ret:%d", size);
return size; return -EINVAL;
} }
len = size;
/* Product number should only be 16 characters. Any more, /* Product number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe * and something could be wrong. Cap it at 16 to be safe
*/ */
if (size > 16) { if (len >= sizeof(adev->product_number)) {
DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake"); DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
size = 16; len = sizeof(adev->product_number) - 1;
} }
memcpy(adev->product_number, &buff[2], size); memcpy(adev->product_number, &buff[2], len);
adev->product_number[size] = '\0'; adev->product_number[len] = '\0';
addrptr += size + 1; addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff); size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) { if (size < 1) {
DRM_ERROR("Failed to read FRU product version, ret:%d", size); DRM_ERROR("Failed to read FRU product version, ret:%d", size);
return size; return -EINVAL;
} }
addrptr += size + 1; addrptr += size + 1;
...@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) ...@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
if (size < 1) { if (size < 1) {
DRM_ERROR("Failed to read FRU serial number, ret:%d", size); DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
return size; return -EINVAL;
} }
len = size;
/* Serial number should only be 16 characters. Any more, /* Serial number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe * and something could be wrong. Cap it at 16 to be safe
*/ */
if (size > 16) { if (len >= sizeof(adev->serial)) {
DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake"); DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
size = 16; len = sizeof(adev->serial) - 1;
} }
memcpy(adev->serial, &buff[2], size); memcpy(adev->serial, &buff[2], len);
adev->serial[size] = '\0'; adev->serial[len] = '\0';
return 0; return 0;
} }
...@@ -76,6 +76,7 @@ struct psp_ring ...@@ -76,6 +76,7 @@ struct psp_ring
uint64_t ring_mem_mc_addr; uint64_t ring_mem_mc_addr;
void *ring_mem_handle; void *ring_mem_handle;
uint32_t ring_size; uint32_t ring_size;
uint32_t ring_wptr;
}; };
/* More registers may will be supported */ /* More registers may will be supported */
......
...@@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp) ...@@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); data = psp->km_ring.ring_wptr;
else else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
...@@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) ...@@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
psp->km_ring.ring_wptr = value;
} else } else
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
} }
......
...@@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp) ...@@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); data = psp->km_ring.ring_wptr;
else else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
return data; return data;
...@@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value) ...@@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
/* send interrupt to PSP for SRIOV ring write pointer update */ /* send interrupt to PSP for SRIOV ring write pointer update */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_CONSUME_CMD); GFX_CTRL_CMD_ID_CONSUME_CMD);
psp->km_ring.ring_wptr = value;
} else } else
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
} }
......
...@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error: error:
dma_fence_put(fence); dma_fence_put(fence);
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo); amdgpu_bo_unref(&bo);
return r; return r;
......
...@@ -925,6 +925,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) ...@@ -925,6 +925,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
} }
if (!adev->dm.dc->ctx->dmub_srv)
adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
if (!adev->dm.dc->ctx->dmub_srv) { if (!adev->dm.dc->ctx->dmub_srv) {
DRM_ERROR("Couldn't allocate DC DMUB server!\n"); DRM_ERROR("Couldn't allocate DC DMUB server!\n");
...@@ -1954,7 +1955,6 @@ static int dm_suspend(void *handle) ...@@ -1954,7 +1955,6 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev); amdgpu_dm_irq_suspend(adev);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return 0; return 0;
...@@ -5500,7 +5500,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, ...@@ -5500,7 +5500,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct drm_display_mode saved_mode; struct drm_display_mode saved_mode;
struct drm_display_mode *freesync_mode = NULL; struct drm_display_mode *freesync_mode = NULL;
bool native_mode_found = false; bool native_mode_found = false;
bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false; bool recalculate_timing = false;
bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
int mode_refresh; int mode_refresh;
int preferred_refresh = 0; int preferred_refresh = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
...@@ -5563,7 +5564,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, ...@@ -5563,7 +5564,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/ */
DRM_DEBUG_DRIVER("No preferred mode found\n"); DRM_DEBUG_DRIVER("No preferred mode found\n");
} else { } else {
recalculate_timing |= amdgpu_freesync_vid_mode && recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector); is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) { if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false); freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
...@@ -5571,12 +5572,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, ...@@ -5571,12 +5572,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
mode = *freesync_mode; mode = *freesync_mode;
} else { } else {
decide_crtc_timing_for_drm_display_mode( decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode, &mode, preferred_mode, scale);
dm_state ? (dm_state->scaling != RMX_OFF) : false);
}
preferred_refresh = drm_mode_vrefresh(preferred_mode); preferred_refresh = drm_mode_vrefresh(preferred_mode);
} }
}
if (recalculate_timing) if (recalculate_timing)
drm_mode_set_crtcinfo(&saved_mode, 0); drm_mode_set_crtcinfo(&saved_mode, 0);
...@@ -5587,7 +5587,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, ...@@ -5587,7 +5587,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
* If scaling is enabled and refresh rate didn't change * If scaling is enabled and refresh rate didn't change
* we copy the vic and polarities of the old timings * we copy the vic and polarities of the old timings
*/ */
if (!recalculate_timing || mode_refresh != preferred_refresh) if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode( fill_stream_properties_from_drm_display_mode(
stream, &mode, &aconnector->base, con_state, NULL, stream, &mode, &aconnector->base, con_state, NULL,
requested_bpc); requested_bpc);
...@@ -9854,7 +9854,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, ...@@ -9854,7 +9854,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
if (cursor_scale_w != primary_scale_w || if (cursor_scale_w != primary_scale_w ||
cursor_scale_h != primary_scale_h) { cursor_scale_h != primary_scale_h) {
DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n"); drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
return -EINVAL; return -EINVAL;
} }
...@@ -9891,7 +9891,7 @@ static int validate_overlay(struct drm_atomic_state *state) ...@@ -9891,7 +9891,7 @@ static int validate_overlay(struct drm_atomic_state *state)
int i; int i;
struct drm_plane *plane; struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state; struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_plane_state *primary_state, *overlay_state = NULL; struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
/* Check if primary plane is contained inside overlay */ /* Check if primary plane is contained inside overlay */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
...@@ -9921,6 +9921,14 @@ static int validate_overlay(struct drm_atomic_state *state) ...@@ -9921,6 +9921,14 @@ static int validate_overlay(struct drm_atomic_state *state)
if (!primary_state->crtc) if (!primary_state->crtc)
return 0; return 0;
/* check if cursor plane is enabled */
cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
if (IS_ERR(cursor_state))
return PTR_ERR(cursor_state);
if (drm_atomic_plane_disabling(plane->state, cursor_state))
return 0;
/* Perform the bounds check to ensure the overlay plane covers the primary */ /* Perform the bounds check to ensure the overlay plane covers the primary */
if (primary_state->crtc_x < overlay_state->crtc_x || if (primary_state->crtc_x < overlay_state->crtc_x ||
primary_state->crtc_y < overlay_state->crtc_y || primary_state->crtc_y < overlay_state->crtc_y ||
......
...@@ -3236,7 +3236,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc, ...@@ -3236,7 +3236,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (voltage_supported && dummy_pstate_supported) { if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
context->bw_ctx.bw.dcn.clk.p_state_change_support = false; context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
goto restore_dml_state; goto restore_dml_state;
} }
......
...@@ -20,7 +20,6 @@ config DRM_I915 ...@@ -20,7 +20,6 @@ config DRM_I915
select INPUT if ACPI select INPUT if ACPI
select ACPI_VIDEO if ACPI select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI select ACPI_BUTTON if ACPI
select IO_MAPPING
select SYNC_FILE select SYNC_FILE
select IOSF_MBI select IOSF_MBI
select CRC32 select CRC32
......
...@@ -367,10 +367,11 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) ...@@ -367,10 +367,11 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
goto err_unpin; goto err_unpin;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start + ret = remap_io_mapping(area,
(vma->ggtt_view.partial.offset << PAGE_SHIFT), area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start)); min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
if (ret) if (ret)
goto err_fence; goto err_fence;
......
...@@ -1905,6 +1905,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, ...@@ -1905,6 +1905,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
/* i915_mm.c */ /* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
int remap_io_sg(struct vm_area_struct *vma, int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size, unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase); struct scatterlist *sgl, resource_size_t iobase);
......
...@@ -37,6 +37,17 @@ struct remap_pfn { ...@@ -37,6 +37,17 @@ struct remap_pfn {
resource_size_t iobase; resource_size_t iobase;
}; };
static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
/* Special PTE are not associated with any struct page */
set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
r->pfn++;
return 0;
}
#define use_dma(io) ((io) != -1) #define use_dma(io) ((io) != -1)
static inline unsigned long sgt_pfn(const struct remap_pfn *r) static inline unsigned long sgt_pfn(const struct remap_pfn *r)
...@@ -66,7 +77,40 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data) ...@@ -66,7 +77,40 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
return 0; return 0;
} }
/**
* remap_io_mapping - remap an IO mapping to userspace
* @vma: user vma to map to
* @addr: target user address to start at
* @pfn: physical address of kernel memory
* @size: size of map area
* @iomap: the source io_mapping
*
* Note: this is only safe if the mm semaphore is held when called.
*/
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap)
{
struct remap_pfn r;
int err;
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
r.mm = vma->vm_mm;
r.pfn = pfn;
r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
(pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
if (unlikely(err)) {
zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
return err;
}
return 0;
}
/** /**
* remap_io_sg - remap an IO mapping to userspace * remap_io_sg - remap an IO mapping to userspace
......
...@@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg) ...@@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg)
for (n = 0; n < smoke[0].ncontexts; n++) { for (n = 0; n < smoke[0].ncontexts; n++) {
smoke[0].contexts[n] = live_context(i915, file); smoke[0].contexts[n] = live_context(i915, file);
if (!smoke[0].contexts[n]) { if (IS_ERR(smoke[0].contexts[n])) {
ret = -ENOMEM; ret = PTR_ERR(smoke[0].contexts[n]);
goto out_contexts; goto out_contexts;
} }
} }
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "trace.h" #include "trace.h"
/* XXX move to include/uapi/drm/drm_fourcc.h? */ /* XXX move to include/uapi/drm/drm_fourcc.h? */
#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22) #define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
struct reset_control; struct reset_control;
......
...@@ -510,7 +510,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, ...@@ -510,7 +510,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
* dGPU sector layout. * dGPU sector layout.
*/ */
if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU) if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
base |= BIT(39); base |= BIT_ULL(39);
#endif #endif
tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH); tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
......
...@@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client) ...@@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0) { if (err < 0) {
dev_err(sor->dev, "failed to acquire SOR reset: %d\n", dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
err); err);
return err; goto rpm_put;
} }
err = reset_control_assert(sor->rst); err = reset_control_assert(sor->rst);
if (err < 0) { if (err < 0) {
dev_err(sor->dev, "failed to assert SOR reset: %d\n", dev_err(sor->dev, "failed to assert SOR reset: %d\n",
err); err);
return err; goto rpm_put;
} }
} }
err = clk_prepare_enable(sor->clk); err = clk_prepare_enable(sor->clk);
if (err < 0) { if (err < 0) {
dev_err(sor->dev, "failed to enable clock: %d\n", err); dev_err(sor->dev, "failed to enable clock: %d\n", err);
return err; goto rpm_put;
} }
usleep_range(1000, 3000); usleep_range(1000, 3000);
...@@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client) ...@@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client)
dev_err(sor->dev, "failed to deassert SOR reset: %d\n", dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
err); err);
clk_disable_unprepare(sor->clk); clk_disable_unprepare(sor->clk);
return err; goto rpm_put;
} }
reset_control_release(sor->rst); reset_control_release(sor->rst);
...@@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client) ...@@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client)
} }
return 0; return 0;
rpm_put:
if (sor->rst)
pm_runtime_put(sor->dev);
return err;
} }
static int tegra_sor_exit(struct host1x_client *client) static int tegra_sor_exit(struct host1x_client *client)
...@@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev) ...@@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux) if (!sor->aux)
return -EPROBE_DEFER; return -EPROBE_DEFER;
if (get_device(&sor->aux->ddc.dev)) { if (get_device(sor->aux->dev))
if (try_module_get(sor->aux->ddc.owner))
sor->output.ddc = &sor->aux->ddc; sor->output.ddc = &sor->aux->ddc;
else
put_device(&sor->aux->ddc.dev);
}
} }
if (!sor->aux) { if (!sor->aux) {
...@@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev) ...@@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
err = tegra_sor_parse_dt(sor); err = tegra_sor_parse_dt(sor);
if (err < 0) if (err < 0)
return err; goto put_aux;
err = tegra_output_probe(&sor->output); err = tegra_output_probe(&sor->output);
if (err < 0) if (err < 0) {
return dev_err_probe(&pdev->dev, err, dev_err_probe(&pdev->dev, err, "failed to probe output\n");
"failed to probe output\n"); goto put_aux;
}
if (sor->ops && sor->ops->probe) { if (sor->ops && sor->ops->probe) {
err = sor->ops->probe(sor); err = sor->ops->probe(sor);
...@@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev) ...@@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sor); platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
INIT_LIST_HEAD(&sor->client.list); host1x_client_init(&sor->client);
sor->client.ops = &sor_client_ops; sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev; sor->client.dev = &pdev->dev;
err = host1x_client_register(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
goto rpm_disable;
}
/* /*
* On Tegra210 and earlier, provide our own implementation for the * On Tegra210 and earlier, provide our own implementation for the
* pad output clock. * pad output clock.
...@@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev) ...@@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
sor->index); sor->index);
if (!name) { if (!name) {
err = -ENOMEM; err = -ENOMEM;
goto unregister; goto uninit;
} }
err = host1x_client_resume(&sor->client); err = host1x_client_resume(&sor->client);
if (err < 0) { if (err < 0) {
dev_err(sor->dev, "failed to resume: %d\n", err); dev_err(sor->dev, "failed to resume: %d\n", err);
goto unregister; goto uninit;
} }
sor->clk_pad = tegra_clk_sor_pad_register(sor, name); sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
...@@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev) ...@@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev)
err = PTR_ERR(sor->clk_pad); err = PTR_ERR(sor->clk_pad);
dev_err(sor->dev, "failed to register SOR pad clock: %d\n", dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
err); err);
goto unregister; goto uninit;
}
err = __host1x_client_register(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
goto uninit;
} }
return 0; return 0;
unregister: uninit:
host1x_client_unregister(&sor->client); host1x_client_exit(&sor->client);
rpm_disable:
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
remove: remove:
if (sor->aux)
sor->output.ddc = NULL;
tegra_output_remove(&sor->output); tegra_output_remove(&sor->output);
put_aux:
if (sor->aux)
put_device(sor->aux->dev);
return err; return err;
} }
...@@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev) ...@@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
if (sor->aux) {
put_device(sor->aux->dev);
sor->output.ddc = NULL;
}
tegra_output_remove(&sor->output); tegra_output_remove(&sor->output);
return 0; return 0;
......
...@@ -735,6 +735,29 @@ void host1x_driver_unregister(struct host1x_driver *driver) ...@@ -735,6 +735,29 @@ void host1x_driver_unregister(struct host1x_driver *driver)
} }
EXPORT_SYMBOL(host1x_driver_unregister); EXPORT_SYMBOL(host1x_driver_unregister);
/**
* __host1x_client_init() - initialize a host1x client
* @client: host1x client
* @key: lock class key for the client-specific mutex
*/
void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
{
INIT_LIST_HEAD(&client->list);
__mutex_init(&client->lock, "host1x client lock", key);
client->usecount = 0;
}
EXPORT_SYMBOL(__host1x_client_init);
/**
* host1x_client_exit() - uninitialize a host1x client
* @client: host1x client
*/
void host1x_client_exit(struct host1x_client *client)
{
mutex_destroy(&client->lock);
}
EXPORT_SYMBOL(host1x_client_exit);
/** /**
* __host1x_client_register() - register a host1x client * __host1x_client_register() - register a host1x client
* @client: host1x client * @client: host1x client
...@@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister); ...@@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister);
* device and call host1x_device_init(), which will in turn call each client's * device and call host1x_device_init(), which will in turn call each client's
* &host1x_client_ops.init implementation. * &host1x_client_ops.init implementation.
*/ */
int __host1x_client_register(struct host1x_client *client, int __host1x_client_register(struct host1x_client *client)
struct lock_class_key *key)
{ {
struct host1x *host1x; struct host1x *host1x;
int err; int err;
INIT_LIST_HEAD(&client->list);
__mutex_init(&client->lock, "host1x client lock", key);
client->usecount = 0;
mutex_lock(&devices_lock); mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) { list_for_each_entry(host1x, &devices, list) {
......
...@@ -52,6 +52,13 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) ...@@ -52,6 +52,13 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
get_page(page); get_page(page);
if (vmf->vma->vm_file)
page->mapping = vmf->vma->vm_file->f_mapping;
else
printk(KERN_ERR "no mapping available\n");
BUG_ON(!page->mapping);
page->index = vmf->pgoff; page->index = vmf->pgoff;
vmf->page = page; vmf->page = page;
...@@ -144,6 +151,17 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = { ...@@ -144,6 +151,17 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
.page_mkwrite = fb_deferred_io_mkwrite, .page_mkwrite = fb_deferred_io_mkwrite,
}; };
static int fb_deferred_io_set_page_dirty(struct page *page)
{
if (!PageDirty(page))
SetPageDirty(page);
return 0;
}
static const struct address_space_operations fb_deferred_io_aops = {
.set_page_dirty = fb_deferred_io_set_page_dirty,
};
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{ {
vma->vm_ops = &fb_deferred_io_vm_ops; vma->vm_ops = &fb_deferred_io_vm_ops;
...@@ -194,12 +212,29 @@ void fb_deferred_io_init(struct fb_info *info) ...@@ -194,12 +212,29 @@ void fb_deferred_io_init(struct fb_info *info)
} }
EXPORT_SYMBOL_GPL(fb_deferred_io_init); EXPORT_SYMBOL_GPL(fb_deferred_io_init);
void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file)
{
file->f_mapping->a_ops = &fb_deferred_io_aops;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info) void fb_deferred_io_cleanup(struct fb_info *info)
{ {
struct fb_deferred_io *fbdefio = info->fbdefio; struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
int i;
BUG_ON(!fbdefio); BUG_ON(!fbdefio);
cancel_delayed_work_sync(&info->deferred_work); cancel_delayed_work_sync(&info->deferred_work);
/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
page = fb_deferred_io_page(info, i);
page->mapping = NULL;
}
mutex_destroy(&fbdefio->lock); mutex_destroy(&fbdefio->lock);
} }
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
...@@ -1415,6 +1415,10 @@ __releases(&info->lock) ...@@ -1415,6 +1415,10 @@ __releases(&info->lock)
if (res) if (res)
module_put(info->fbops->owner); module_put(info->fbops->owner);
} }
#ifdef CONFIG_FB_DEFERRED_IO
if (info->fbdefio)
fb_deferred_io_open(info, inode, file);
#endif
out: out:
unlock_fb_info(info); unlock_fb_info(info);
if (res) if (res)
......
...@@ -659,6 +659,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, ...@@ -659,6 +659,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
/* drivers/video/fb_defio.c */ /* drivers/video/fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma); int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
extern void fb_deferred_io_init(struct fb_info *info); extern void fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file);
extern void fb_deferred_io_cleanup(struct fb_info *info); extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start, extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync); loff_t end, int datasync);
......
...@@ -332,12 +332,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev) ...@@ -332,12 +332,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device); int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device); int host1x_device_exit(struct host1x_device *device);
int __host1x_client_register(struct host1x_client *client, void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
struct lock_class_key *key); void host1x_client_exit(struct host1x_client *client);
#define host1x_client_register(class) \
#define host1x_client_init(client) \
({ \
static struct lock_class_key __key; \
__host1x_client_init(client, &__key); \
})
int __host1x_client_register(struct host1x_client *client);
/*
* Note that this wrapper calls __host1x_client_init() for compatibility
* with existing callers. Callers that want to separately initialize and
* register a host1x client must first initialize using either of the
* __host1x_client_init() or host1x_client_init() functions and then use
* the low-level __host1x_client_register() function to avoid the client
* getting reinitialized.
*/
#define host1x_client_register(client) \
({ \ ({ \
static struct lock_class_key __key; \ static struct lock_class_key __key; \
__host1x_client_register(class, &__key); \ __host1x_client_init(client, &__key); \
__host1x_client_register(client); \
}) })
int host1x_client_unregister(struct host1x_client *client); int host1x_client_unregister(struct host1x_client *client);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment