Commit a38fd7d8 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'drm-fixes-2018-09-21' of git://anongit.freedesktop.org/drm/drm

David writes:
  "drm fixes for 4.19-rc5:

   - core: fix debugfs for atomic, fix the check for atomic for
     non-modesetting drivers
   - amdgpu: adds a new PCI id, some kfd fixes and a sdma fix
   - i915: a bunch of GVT fixes.
   - vc4: scaling fix
   - vmwgfx: modesetting fixes and a old buffer eviction fix
   - udl: framebuffer destruction fix
   - sun4i: disable on R40 fix until next kernel
   - pl111: NULL termination on table fix"

* tag 'drm-fixes-2018-09-21' of git://anongit.freedesktop.org/drm/drm: (21 commits)
  drm/amdkfd: Fix ATS capablity was not reported correctly on some APUs
  drm/amdkfd: Change the control stack MTYPE from UC to NC on GFX9
  drm/amdgpu: Fix SDMA HQD destroy error on gfx_v7
  drm/vmwgfx: Fix buffer object eviction
  drm/vmwgfx: Don't impose STDU limits on framebuffer size
  drm/vmwgfx: limit mode size for all display unit to texture_max
  drm/vmwgfx: limit screen size to stdu_max during check_modeset
  drm/vmwgfx: don't check for old_crtc_state enable status
  drm/amdgpu: add new polaris pci id
  drm: sun4i: drop second PLL from A64 HDMI PHY
  drm: fix drm_drv_uses_atomic_modeset on non modesetting drivers.
  drm/i915/gvt: clear ggtt entries when destroy vgpu
  drm/i915/gvt: request srcu_read_lock before checking if one gfn is valid
  drm/i915/gvt: Add GEN9_CLKGATE_DIS_4 to default BXT mmio handler
  drm/i915/gvt: Init PHY related registers for BXT
  drm/atomic: Use drm_drv_uses_atomic_modeset() for debugfs creation
  drm/fb-helper: Remove set but not used variable 'connector_funcs'
  drm: udl: Destroy framebuffer only if it was initialized
  drm/sun4i: Remove R40 display pipeline compatibles
  drm/pl111: Make sure of_device_id tables are NULL terminated
  ...
parents 234b69e3 4fcb7f8b
...@@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) ...@@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr, void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr) void **cpu_ptr, bool mqd_gfx9)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL; struct amdgpu_bo *bo = NULL;
...@@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, ...@@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel; bp.type = ttm_bo_type_kernel;
bp.resv = NULL; bp.resv = NULL;
if (mqd_gfx9)
bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
r = amdgpu_bo_create(adev, &bp, &bo); r = amdgpu_bo_create(adev, &bp, &bo);
if (r) { if (r) {
dev_err(adev->dev, dev_err(adev->dev,
......
...@@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); ...@@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
/* Shared API */ /* Shared API */
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr, void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr); void **cpu_ptr, bool mqd_gfx9);
void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
void get_local_mem_info(struct kgd_dev *kgd, void get_local_mem_info(struct kgd_dev *kgd,
struct kfd_local_mem_info *mem_info); struct kfd_local_mem_info *mem_info);
......
...@@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, ...@@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
while (true) { while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break; break;
if (time_after(jiffies, end_jiffies)) if (time_after(jiffies, end_jiffies))
return -ETIME; return -ETIME;
......
...@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ...@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
break; break;
case CHIP_POLARIS10: case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) { if (type == CGS_UCODE_ID_SMU) {
if ((adev->pdev->device == 0x67df) && if (((adev->pdev->device == 0x67df) &&
((adev->pdev->revision == 0xe0) || ((adev->pdev->revision == 0xe0) ||
(adev->pdev->revision == 0xe3) || (adev->pdev->revision == 0xe3) ||
(adev->pdev->revision == 0xe4) || (adev->pdev->revision == 0xe4) ||
(adev->pdev->revision == 0xe5) || (adev->pdev->revision == 0xe5) ||
(adev->pdev->revision == 0xe7) || (adev->pdev->revision == 0xe7) ||
(adev->pdev->revision == 0xef))) ||
((adev->pdev->device == 0x6fdf) &&
(adev->pdev->revision == 0xef))) { (adev->pdev->revision == 0xef))) {
info->is_kicker = true; info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
......
...@@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = { ...@@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
/* Polaris12 */ /* Polaris12 */
{0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
......
...@@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->init_gtt_mem_allocation( if (kfd->kfd2kgd->init_gtt_mem_allocation(
kfd->kgd, size, &kfd->gtt_mem, kfd->kgd, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
false)) {
dev_err(kfd_device, "Could not allocate %d bytes\n", size); dev_err(kfd_device, "Could not allocate %d bytes\n", size);
goto out; goto out;
} }
......
...@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) ...@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
struct amd_iommu_device_info iommu_info; struct amd_iommu_device_info iommu_info;
unsigned int pasid_limit; unsigned int pasid_limit;
int err; int err;
struct kfd_topology_device *top_dev;
if (!kfd->device_info->needs_iommu_device) top_dev = kfd_topology_device_by_id(kfd->id);
/*
* Overwrite ATS capability according to needs_iommu_device to fix
* potential missing corresponding bit in CRAT of BIOS.
*/
if (!kfd->device_info->needs_iommu_device) {
top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
return 0; return 0;
}
top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
iommu_info.flags = 0; iommu_info.flags = 0;
err = amd_iommu_device_info(kfd->pdev, &iommu_info); err = amd_iommu_device_info(kfd->pdev, &iommu_info);
......
...@@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, ...@@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
&((*mqd_mem_obj)->gtt_mem), &((*mqd_mem_obj)->gtt_mem),
&((*mqd_mem_obj)->gpu_addr), &((*mqd_mem_obj)->gpu_addr),
(void *)&((*mqd_mem_obj)->cpu_ptr)); (void *)&((*mqd_mem_obj)->cpu_ptr), true);
} else } else
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
mqd_mem_obj); mqd_mem_obj);
......
...@@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu); ...@@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu);
int kfd_topology_remove_device(struct kfd_dev *gpu); int kfd_topology_remove_device(struct kfd_dev *gpu);
struct kfd_topology_device *kfd_topology_device_by_proximity_domain( struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
uint32_t proximity_domain); uint32_t proximity_domain);
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
......
...@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain( ...@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
return device; return device;
} }
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
{ {
struct kfd_topology_device *top_dev; struct kfd_topology_device *top_dev = NULL;
struct kfd_dev *device = NULL; struct kfd_topology_device *ret = NULL;
down_read(&topology_lock); down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list) list_for_each_entry(top_dev, &topology_device_list, list)
if (top_dev->gpu_id == gpu_id) { if (top_dev->gpu_id == gpu_id) {
device = top_dev->gpu; ret = top_dev;
break; break;
} }
up_read(&topology_lock); up_read(&topology_lock);
return device; return ret;
}
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
{
struct kfd_topology_device *top_dev;
top_dev = kfd_topology_device_by_id(gpu_id);
if (!top_dev)
return NULL;
return top_dev->gpu;
} }
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
......
...@@ -292,7 +292,7 @@ struct tile_config { ...@@ -292,7 +292,7 @@ struct tile_config {
struct kfd2kgd_calls { struct kfd2kgd_calls {
int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr, void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr); void **cpu_ptr, bool mqd_gfx9);
void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
......
...@@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, ...@@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter conn_iter; struct drm_connector_list_iter conn_iter;
if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) if (!drm_drv_uses_atomic_modeset(dev))
return; return;
list_for_each_entry(plane, &config->plane_list, head) { list_for_each_entry(plane, &config->plane_list, head) {
......
...@@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, ...@@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
return ret; return ret;
} }
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { if (drm_drv_uses_atomic_modeset(dev)) {
ret = drm_atomic_debugfs_init(minor); ret = drm_atomic_debugfs_init(minor);
if (ret) { if (ret) {
DRM_ERROR("Failed to create atomic debugfs files\n"); DRM_ERROR("Failed to create atomic debugfs files\n");
......
...@@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, ...@@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
{ {
int c, o; int c, o;
struct drm_connector *connector; struct drm_connector *connector;
const struct drm_connector_helper_funcs *connector_funcs;
int my_score, best_score, score; int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc; struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn; struct drm_fb_helper_connector *fb_helper_conn;
...@@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, ...@@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (drm_has_preferred_mode(fb_helper_conn, width, height)) if (drm_has_preferred_mode(fb_helper_conn, width, height))
my_score++; my_score++;
connector_funcs = connector->helper_private;
/* /*
* select a crtc for this connector and then attempt to configure * select a crtc for this connector and then attempt to configure
* remaining connectors * remaining connectors
......
...@@ -3210,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) ...@@ -3210,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
......
...@@ -1833,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) ...@@ -1833,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
{ {
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
struct kvm *kvm; struct kvm *kvm;
int idx;
bool ret;
if (!handle_valid(handle)) if (!handle_valid(handle))
return false; return false;
...@@ -1840,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) ...@@ -1840,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
info = (struct kvmgt_guest_info *)handle; info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm; kvm = info->kvm;
return kvm_is_visible_gfn(kvm, gfn); idx = srcu_read_lock(&kvm->srcu);
ret = kvm_is_visible_gfn(kvm, gfn);
srcu_read_unlock(&kvm->srcu, idx);
return ret;
} }
struct intel_gvt_mpt kvmgt_mpt = { struct intel_gvt_mpt kvmgt_mpt = {
......
...@@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) ...@@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */ /* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
if (IS_BROXTON(vgpu->gvt->dev_priv)) {
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
~PHY_POWER_GOOD;
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
~PHY_POWER_GOOD;
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
~BIT(30);
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
~BIT(30);
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
~BXT_PHY_LANE_ENABLED;
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK;
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
~BXT_PHY_LANE_ENABLED;
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK;
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
~BXT_PHY_LANE_ENABLED;
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK;
}
} else { } else {
#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
/* only reset the engine related, so starting with 0x44200 /* only reset the engine related, so starting with 0x44200
......
...@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) ...@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu); intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu); intel_vgpu_clean_opregion(vgpu);
intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_clean_gtt(vgpu); intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu); intel_vgpu_free_resource(vgpu);
......
...@@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev) ...@@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev)
} }
static const struct of_device_id vexpress_muxfpga_match[] = { static const struct of_device_id vexpress_muxfpga_match[] = {
{ .compatible = "arm,vexpress-muxfpga", } { .compatible = "arm,vexpress-muxfpga", },
{}
}; };
static struct platform_driver vexpress_muxfpga_driver = { static struct platform_driver vexpress_muxfpga_driver = {
......
...@@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = { ...@@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-a33-display-engine" }, { .compatible = "allwinner,sun8i-a33-display-engine" },
{ .compatible = "allwinner,sun8i-a83t-display-engine" }, { .compatible = "allwinner,sun8i-a83t-display-engine" },
{ .compatible = "allwinner,sun8i-h3-display-engine" }, { .compatible = "allwinner,sun8i-h3-display-engine" },
{ .compatible = "allwinner,sun8i-r40-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" }, { .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" }, { .compatible = "allwinner,sun9i-a80-display-engine" },
{ } { }
......
...@@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = { ...@@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = { static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
.has_phy_clk = true, .has_phy_clk = true,
.has_second_pll = true,
.phy_init = &sun8i_hdmi_phy_init_h3, .phy_init = &sun8i_hdmi_phy_init_h3,
.phy_disable = &sun8i_hdmi_phy_disable_h3, .phy_disable = &sun8i_hdmi_phy_disable_h3,
.phy_config = &sun8i_hdmi_phy_config_h3, .phy_config = &sun8i_hdmi_phy_config_h3,
......
...@@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = { ...@@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.vi_num = 1, .vi_num = 1,
}; };
static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
.ccsc = 0,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.ui_num = 3,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
.ccsc = 1,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.ui_num = 1,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2, .vi_num = 2,
.ui_num = 1, .ui_num = 1,
...@@ -582,14 +566,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = { ...@@ -582,14 +566,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.compatible = "allwinner,sun8i-h3-de2-mixer-0", .compatible = "allwinner,sun8i-h3-de2-mixer-0",
.data = &sun8i_h3_mixer0_cfg, .data = &sun8i_h3_mixer0_cfg,
}, },
{
.compatible = "allwinner,sun8i-r40-de2-mixer-0",
.data = &sun8i_r40_mixer0_cfg,
},
{
.compatible = "allwinner,sun8i-r40-de2-mixer-1",
.data = &sun8i_r40_mixer1_cfg,
},
{ {
.compatible = "allwinner,sun8i-v3s-de2-mixer", .compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg, .data = &sun8i_v3s_mixer_cfg,
......
...@@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev) ...@@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
/* sun4i_drv uses this list to check if a device node is a TCON TOP */ /* sun4i_drv uses this list to check if a device node is a TCON TOP */
const struct of_device_id sun8i_tcon_top_of_table[] = { const struct of_device_id sun8i_tcon_top_of_table[] = {
{ .compatible = "allwinner,sun8i-r40-tcon-top" },
{ /* sentinel */ } { /* sentinel */ }
}; };
MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table); MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
......
...@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev, ...@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev,
{ {
drm_fb_helper_unregister_fbi(&ufbdev->helper); drm_fb_helper_unregister_fbi(&ufbdev->helper);
drm_fb_helper_fini(&ufbdev->helper); drm_fb_helper_fini(&ufbdev->helper);
if (ufbdev->ufb.obj) {
drm_framebuffer_unregister_private(&ufbdev->ufb.base); drm_framebuffer_unregister_private(&ufbdev->ufb.base);
drm_framebuffer_cleanup(&ufbdev->ufb.base); drm_framebuffer_cleanup(&ufbdev->ufb.base);
drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
}
} }
int udl_fbdev_init(struct drm_device *dev) int udl_fbdev_init(struct drm_device *dev)
......
...@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) ...@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
vc4_state->crtc_h); vc4_state->crtc_h);
vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
vc4_state->y_scaling[0] == VC4_SCALING_NONE);
if (num_planes > 1) { if (num_planes > 1) {
vc4_state->is_yuv = true; vc4_state->is_yuv = true;
...@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) ...@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_get_scaling_mode(vc4_state->src_h[1], vc4_get_scaling_mode(vc4_state->src_h[1],
vc4_state->crtc_h); vc4_state->crtc_h);
/* YUV conversion requires that scaling be enabled, /* YUV conversion requires that horizontal scaling be enabled,
* even on a plane that's otherwise 1:1. Choose TPZ * even on a plane that's otherwise 1:1. Looks like only PPF
* for simplicity. * works in that case, so let's pick that one.
*/ */
if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) if (vc4_state->is_unity)
vc4_state->x_scaling[0] = VC4_SCALING_TPZ; vc4_state->x_scaling[0] = VC4_SCALING_PPF;
if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
} else { } else {
vc4_state->x_scaling[1] = VC4_SCALING_NONE; vc4_state->x_scaling[1] = VC4_SCALING_NONE;
vc4_state->y_scaling[1] = VC4_SCALING_NONE; vc4_state->y_scaling[1] = VC4_SCALING_NONE;
} }
vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
vc4_state->y_scaling[1] == VC4_SCALING_NONE);
/* No configuring scaling on the cursor plane, since it gets /* No configuring scaling on the cursor plane, since it gets
non-vblank-synced updates, and scaling requires requires non-vblank-synced updates, and scaling requires requires
LBM changes which have to be vblank-synced. LBM changes which have to be vblank-synced.
...@@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane, ...@@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
} }
if (!vc4_state->is_unity) { if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
/* LBM Base Address. */ /* LBM Base Address. */
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) { vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
......
...@@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, ...@@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{ {
struct vmw_buffer_object *vbo = struct vmw_buffer_object *vbo =
container_of(bo, struct vmw_buffer_object, base); container_of(bo, struct vmw_buffer_object, base);
struct ttm_operation_ctx ctx = { interruptible, true }; struct ttm_operation_ctx ctx = { interruptible, false };
int ret; int ret;
if (vbo->pin_count > 0) if (vbo->pin_count > 0)
......
...@@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, ...@@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
struct drm_rect *rects) struct drm_rect *rects)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_rect bounding_box = {0}; struct drm_rect bounding_box = {0};
u64 total_pixels = 0, pixel_mem, bb_mem; u64 total_pixels = 0, pixel_mem, bb_mem;
int i; int i;
for (i = 0; i < num_rects; i++) { for (i = 0; i < num_rects; i++) {
/* /*
* Currently this check is limiting the topology within max * For STDU only individual screen (screen target) is limited by
* texture/screentarget size. This should change in future when * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
* user-space support multiple fb with topology.
*/ */
if (rects[i].x1 < 0 || rects[i].y1 < 0 || if (dev_priv->active_display_unit == vmw_du_screen_target &&
rects[i].x2 > mode_config->max_width || (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
rects[i].y2 > mode_config->max_height) { drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
DRM_ERROR("Invalid GUI layout.\n"); DRM_ERROR("Screen size not supported.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, ...@@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state; struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state; struct vmw_connector_state *vmw_conn_state;
if (!new_crtc_state->enable && old_crtc_state->enable) { if (!new_crtc_state->enable) {
rects[i].x1 = 0; rects[i].x1 = 0;
rects[i].y1 = 0; rects[i].y1 = 0;
rects[i].x2 = 0; rects[i].x2 = 0;
...@@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, ...@@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
if (dev_priv->assume_16bpp) if (dev_priv->assume_16bpp)
assumed_bpp = 2; assumed_bpp = 2;
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
max_width = min(max_width, dev_priv->texture_max_width); max_width = min(max_width, dev_priv->texture_max_width);
max_height = min(max_height, dev_priv->texture_max_height);
/*
* For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
* HEIGHT registers.
*/
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
max_height = min(max_height, dev_priv->stdu_max_height); max_height = min(max_height, dev_priv->stdu_max_height);
max_height = min(max_height, dev_priv->texture_max_height);
} }
/* Add preferred mode */ /* Add preferred mode */
...@@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ...@@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_update_layout_arg *arg = struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data; (struct drm_vmw_update_layout_arg *)data;
void __user *user_rects; void __user *user_rects;
...@@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ...@@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
drm_rects[i].y1 = curr_rect.y; drm_rects[i].y1 = curr_rect.y;
drm_rects[i].x2 = curr_rect.x + curr_rect.w; drm_rects[i].x2 = curr_rect.x + curr_rect.w;
drm_rects[i].y2 = curr_rect.y + curr_rect.h; drm_rects[i].y2 = curr_rect.y + curr_rect.h;
/*
* Currently this check is limiting the topology within
* mode_config->max (which actually is max texture size
* supported by virtual device). This limit is here to address
* window managers that create a big framebuffer for whole
* topology.
*/
if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
drm_rects[i].x2 > mode_config->max_width ||
drm_rects[i].y2 > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
}
} }
ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
......
...@@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) ...@@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_target; dev_priv->active_display_unit = vmw_du_screen_target;
if (dev_priv->capabilities & SVGA_CAP_3D) {
/*
* For 3D VMs, display (scanout) buffer size is the smaller of
* max texture and max STDU
*/
uint32_t max_width, max_height;
max_width = min(dev_priv->texture_max_width,
dev_priv->stdu_max_width);
max_height = min(dev_priv->texture_max_height,
dev_priv->stdu_max_height);
dev->mode_config.max_width = max_width;
dev->mode_config.max_height = max_height;
} else {
/*
* Given various display aspect ratios, there's no way to
* estimate these using prim_bb_mem. So just set these to
* something arbitrarily large and we will reject any layout
* that doesn't fit prim_bb_mem later
*/
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
vmw_kms_create_implicit_placement_property(dev_priv, false); vmw_kms_create_implicit_placement_property(dev_priv, false);
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
......
...@@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, ...@@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
*srf_out = NULL; *srf_out = NULL;
if (for_scanout) { if (for_scanout) {
uint32_t max_width, max_height;
if (!svga3dsurface_is_screen_target_format(format)) { if (!svga3dsurface_is_screen_target_format(format)) {
DRM_ERROR("Invalid Screen Target surface format."); DRM_ERROR("Invalid Screen Target surface format.");
return -EINVAL; return -EINVAL;
} }
max_width = min(dev_priv->texture_max_width, if (size.width > dev_priv->texture_max_width ||
dev_priv->stdu_max_width); size.height > dev_priv->texture_max_height) {
max_height = min(dev_priv->texture_max_height,
dev_priv->stdu_max_height);
if (size.width > max_width || size.height > max_height) {
DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
size.width, size.height, size.width, size.height,
max_width, max_height); dev_priv->texture_max_width,
dev_priv->texture_max_height);
return -EINVAL; return -EINVAL;
} }
} else { } else {
...@@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, ...@@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
srf->res.backup_size += sizeof(SVGA3dDXSOState); srf->res.backup_size += sizeof(SVGA3dDXSOState);
/*
* Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
* size greater than STDU max width/height. This is really a workaround
* to support creation of big framebuffer requested by some user-space
* for whole topology. That big framebuffer won't really be used for
* binding with screen target as during prepare_fb a separate surface is
* created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
*/
if (dev_priv->active_display_unit == vmw_du_screen_target && if (dev_priv->active_display_unit == vmw_du_screen_target &&
for_scanout) for_scanout && size.width <= dev_priv->stdu_max_width &&
size.height <= dev_priv->stdu_max_height)
srf->flags |= SVGA3D_SURFACE_SCREENTARGET; srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
/* /*
......
...@@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature) ...@@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
{ {
return drm_core_check_feature(dev, DRIVER_ATOMIC) || return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
dev->mode_config.funcs->atomic_commit != NULL; (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment