Commit b9cc335f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "A few fixes all over the place:

  radeon is probably the biggest standout, it's a fix for screen
  corruption or hung black outputs so I thought it was worth pulling in.

  Otherwise some amdgpu power control fixes, some misc vmwgfx fixes, one
  etnaviv fix, one virtio-gpu fix, two DP MST fixes, and a single TTM
  fix"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/vmwgfx: Fix order of operation
  drm/vmwgfx: use vmw_cmd_dx_cid_check for query commands.
  drm/vmwgfx: Enable SVGA_3D_CMD_DX_SET_PREDICATION
  drm/amdgpu: disable vm interrupts with vm_fault_stop=2
  drm/amdgpu: print a message if ATPX dGPU power control is missing
  Revert "drm/amdgpu: disable runtime pm on PX laptops without dGPU power control"
  drm/radeon: fix vertical bars appear on monitor (v2)
  drm/ttm: fix kref count mess in ttm_bo_move_to_lru_tail
  drm/virtio: send vblank event after crtc updates
  drm/dp/mst: Restore primary hub guid on resume
  drm/dp/mst: Get validated port ref in drm_dp_update_payload_part1()
  drm/etnaviv: don't move linear memory window on 3D cores without MC2.0
parents 925d96a0 ea996978
...@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) { ...@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected; return amdgpu_atpx_priv.atpx_detected;
} }
bool amdgpu_has_atpx_dgpu_power_cntl(void) {
return amdgpu_atpx_priv.atpx.functions.power_cntl;
}
/** /**
* amdgpu_atpx_call - call an ATPX method * amdgpu_atpx_call - call an ATPX method
* *
...@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas ...@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/ */
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{ {
/* make sure required functions are enabled */
/* dGPU power control is required */
if (atpx->functions.power_cntl == false) {
printk("ATPX dGPU power cntl not present, forcing\n");
atpx->functions.power_cntl = true;
}
if (atpx->functions.px_params) { if (atpx->functions.px_params) {
union acpi_object *info; union acpi_object *info;
struct atpx_px_params output; struct atpx_px_params output;
......
...@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = { ...@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
"LAST", "LAST",
}; };
#if defined(CONFIG_VGA_SWITCHEROO)
bool amdgpu_has_atpx_dgpu_power_cntl(void);
#else
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
#endif
bool amdgpu_device_is_px(struct drm_device *dev) bool amdgpu_device_is_px(struct drm_device *dev)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
...@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_runtime_pm == 1) if (amdgpu_runtime_pm == 1)
runtime = true; runtime = true;
if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl()) if (amdgpu_device_is_px(ddev))
runtime = true; runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime) if (runtime)
......
...@@ -910,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle) ...@@ -910,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
else
return 0;
} }
static int gmc_v7_0_sw_init(void *handle) static int gmc_v7_0_sw_init(void *handle)
......
...@@ -870,7 +870,10 @@ static int gmc_v8_0_late_init(void *handle) ...@@ -870,7 +870,10 @@ static int gmc_v8_0_late_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
else
return 0;
} }
#define mmMC_SEQ_MISC0_FIJI 0xA71 #define mmMC_SEQ_MISC0_FIJI 0xA71
......
...@@ -1796,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) ...@@ -1796,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
req_payload.start_slot = cur_slots; req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) { if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
port = drm_dp_get_validated_port_ref(mgr, port);
if (!port) {
mutex_unlock(&mgr->payload_lock);
return -EINVAL;
}
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else { } else {
...@@ -1823,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) ...@@ -1823,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mgr->payloads[i].payload_state = req_payload.payload_state; mgr->payloads[i].payload_state = req_payload.payload_state;
} }
cur_slots += req_payload.num_slots; cur_slots += req_payload.num_slots;
if (port)
drm_dp_put_port(port);
} }
for (i = 0; i < mgr->max_payloads; i++) { for (i = 0; i < mgr->max_payloads; i++) {
...@@ -2128,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) ...@@ -2128,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->mst_primary) { if (mgr->mst_primary) {
int sret; int sret;
u8 guid[16];
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) { if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
...@@ -2142,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) ...@@ -2142,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
ret = -1; ret = -1;
goto out_unlock; goto out_unlock;
} }
/* Some hubs forget their guids after they resume */
sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
if (sret != 16) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
ret = -1;
goto out_unlock;
}
drm_dp_check_mstb_guid(mgr->mst_primary, guid);
ret = 0; ret = 0;
} else } else
ret = -1; ret = -1;
......
...@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) ...@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail; goto fail;
} }
/*
* Set the GPU linear window to be at the end of the DMA window, where
* the CMA area is likely to reside. This ensures that we are able to
* map the command buffers while having the linear window overlap as
* much RAM as possible, so we can optimize mappings for other buffers.
*
* For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
* to different views of the memory on the individual engines.
*/
if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
(gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
if (dma_mask < PHYS_OFFSET + SZ_2G)
gpu->memory_base = PHYS_OFFSET;
else
gpu->memory_base = dma_mask - SZ_2G + 1;
}
ret = etnaviv_hw_reset(gpu); ret = etnaviv_hw_reset(gpu);
if (ret) if (ret)
goto fail; goto fail;
...@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) ...@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct etnaviv_gpu *gpu; struct etnaviv_gpu *gpu;
u32 dma_mask;
int err = 0; int err = 0;
gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
...@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) ...@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev; gpu->dev = &pdev->dev;
mutex_init(&gpu->lock); mutex_init(&gpu->lock);
/*
* Set the GPU linear window to be at the end of the DMA window, where
* the CMA area is likely to reside. This ensures that we are able to
* map the command buffers while having the linear window overlap as
* much RAM as possible, so we can optimize mappings for other buffers.
*/
dma_mask = (u32)dma_get_required_mask(dev);
if (dma_mask < PHYS_OFFSET + SZ_2G)
gpu->memory_base = PHYS_OFFSET;
else
gpu->memory_base = dma_mask - SZ_2G + 1;
/* Map registers: */ /* Map registers: */
gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
if (IS_ERR(gpu->mmio)) if (IS_ERR(gpu->mmio))
......
...@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev) ...@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0);
} }
static const unsigned ni_dig_offsets[] =
{
NI_DIG0_REGISTER_OFFSET,
NI_DIG1_REGISTER_OFFSET,
NI_DIG2_REGISTER_OFFSET,
NI_DIG3_REGISTER_OFFSET,
NI_DIG4_REGISTER_OFFSET,
NI_DIG5_REGISTER_OFFSET
};
static const unsigned ni_tx_offsets[] =
{
NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
};
static const unsigned evergreen_dp_offsets[] =
{
EVERGREEN_DP0_REGISTER_OFFSET,
EVERGREEN_DP1_REGISTER_OFFSET,
EVERGREEN_DP2_REGISTER_OFFSET,
EVERGREEN_DP3_REGISTER_OFFSET,
EVERGREEN_DP4_REGISTER_OFFSET,
EVERGREEN_DP5_REGISTER_OFFSET
};
/*
* Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
* We go from crtc to connector and it is not relible since it
* should be an opposite direction .If crtc is enable then
* find the dig_fe which selects this crtc and insure that it enable.
* if such dig_fe is found then find dig_be which selects found dig_be and
* insure that it enable and in DP_SST mode.
* if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
* from dp symbols clocks .
*/
static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
unsigned crtc_id, unsigned *ret_dig_fe)
{
unsigned i;
unsigned dig_fe;
unsigned dig_be;
unsigned dig_en_be;
unsigned uniphy_pll;
unsigned digs_fe_selected;
unsigned dig_be_mode;
unsigned dig_fe_mask;
bool is_enabled = false;
bool found_crtc = false;
/* loop through all running dig_fe to find selected crtc */
for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
/* found running pipe */
found_crtc = true;
dig_fe_mask = 1 << i;
dig_fe = i;
break;
}
}
if (found_crtc) {
/* loop through all running dig_be to find selected dig_fe */
for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
/* if dig_fe_selected by dig_be? */
digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
if (dig_fe_mask & digs_fe_selected &&
/* if dig_be in sst mode? */
dig_be_mode == NI_DIG_BE_DPSST) {
dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
ni_dig_offsets[i]);
uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
ni_tx_offsets[i]);
/* dig_be enable and tx is running */
if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
is_enabled = true;
*ret_dig_fe = dig_fe;
break;
}
}
}
}
return is_enabled;
}
/*
* Blank dig when in dp sst mode
* Dig ignores crtc timing
*/
static void evergreen_blank_dp_output(struct radeon_device *rdev,
unsigned dig_fe)
{
unsigned stream_ctrl;
unsigned fifo_ctrl;
unsigned counter = 0;
if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
DRM_ERROR("invalid dig_fe %d\n", dig_fe);
return;
}
stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
evergreen_dp_offsets[dig_fe]);
if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
DRM_ERROR("dig %d , should be enable\n", dig_fe);
return;
}
stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
evergreen_dp_offsets[dig_fe], stream_ctrl);
stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
evergreen_dp_offsets[dig_fe]);
while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
msleep(1);
counter++;
stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
evergreen_dp_offsets[dig_fe]);
}
if (counter >= 32 )
DRM_ERROR("counter exceeds %d\n", counter);
fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
}
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{ {
u32 crtc_enabled, tmp, frame_count, blackout; u32 crtc_enabled, tmp, frame_count, blackout;
int i, j; int i, j;
unsigned dig_fe;
if (!ASIC_IS_NODCE(rdev)) { if (!ASIC_IS_NODCE(rdev)) {
save->vga_render_control = RREG32(VGA_RENDER_CONTROL); save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
...@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav ...@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break; break;
udelay(1); udelay(1);
} }
/*we should disable dig if it drives dp sst*/
/*but we are in radeon_device_init and the topology is unknown*/
/*and it is available after radeon_modeset_init*/
/*the following method radeon_atom_encoder_dpms_dig*/
/*does the job if we initialize it properly*/
/*for now we do it this manually*/
/**/
if (ASIC_IS_DCE5(rdev) &&
evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
evergreen_blank_dp_output(rdev, dig_fe);
/*we could remove 6 lines below*/
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
......
...@@ -250,8 +250,43 @@ ...@@ -250,8 +250,43 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030 #define EVERGREEN_HDMI_BASE 0x7030
/*DIG block*/
#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
#define NI_DIG_FE_CNTL 0x7000
# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
#define NI_DIG_BE_CNTL 0x7140
# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
#define NI_DIG_BE_EN_CNTL 0x7144
# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
# define NI_DIG_BE_DPSST 0
/* Display Port block */ /* Display Port block */
#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
#define EVERGREEN_DP_STEER_FIFO 0x7310
# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280 #define EVERGREEN_DP_SEC_CNTL 0x7280
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0) # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4) # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
...@@ -266,4 +301,15 @@ ...@@ -266,4 +301,15 @@
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24) # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
# define EVERGREEN_DP_SEC_SS_EN (1 << 28) # define EVERGREEN_DP_SEC_SS_EN (1 << 28)
/*DCIO_UNIPHY block*/
#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
#endif #endif
...@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); ...@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{ {
struct ttm_bo_device *bdev = bo->bdev; int put_count = 0;
struct ttm_mem_type_manager *man;
lockdep_assert_held(&bo->resv->lock.base); lockdep_assert_held(&bo->resv->lock.base);
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { put_count = ttm_bo_del_from_lru(bo);
list_del_init(&bo->swap); ttm_bo_list_ref_sub(bo, put_count, true);
list_del_init(&bo->lru); ttm_bo_add_to_lru(bo);
} else {
if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
list_move_tail(&bo->swap, &bo->glob->swap_lru);
man = &bdev->man[bo->mem.mem_type];
list_move_tail(&bo->lru, &man->lru);
}
} }
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
......
...@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
return 0; return 0;
} }
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
unsigned long flags;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (crtc->state->event)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
.enable = virtio_gpu_crtc_enable, .enable = virtio_gpu_crtc_enable,
.disable = virtio_gpu_crtc_disable, .disable = virtio_gpu_crtc_disable,
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_check = virtio_gpu_crtc_atomic_check, .atomic_check = virtio_gpu_crtc_atomic_check,
.atomic_flush = virtio_gpu_crtc_atomic_flush,
}; };
static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
......
...@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { ...@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_cid_check, true, false, true), &vmw_cmd_dx_cid_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
true, false, true), true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok, VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
true, false, true), true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
true, false, true), true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
&vmw_cmd_ok, true, false, true), &vmw_cmd_dx_cid_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok, VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
true, false, true), true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok, VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
true, false, true), true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
true, false, true), true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
true, false, true), true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
true, false, true), true, false, true),
......
...@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info) ...@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
mode = old_mode; mode = old_mode;
old_mode = NULL; old_mode = NULL;
} else if (!vmw_kms_validate_mode_vram(vmw_priv, } else if (!vmw_kms_validate_mode_vram(vmw_priv,
mode->hdisplay * mode->hdisplay *
(var->bits_per_pixel + 7) / 8, DIV_ROUND_UP(var->bits_per_pixel, 8),
mode->vdisplay)) { mode->vdisplay)) {
drm_mode_destroy(vmw_priv->dev, mode); drm_mode_destroy(vmw_priv->dev, mode);
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment