Commit 44e69495 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Rodrigo Vivi

drm/xe/display: Implement display support

As for display, the intent is to share the display code with the i915
driver so that there is maximum reuse there.

We do this by recompiling i915/display code twice.
Now that i915 has been adapted to support the Xe build, we can add
the xe/display support.

This initial work is a collaboration of many people and unfortunately
this squashed patch won't fully honor the proper credits.
But let's try to add a few from the squashed patches:
Co-developed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Co-developed-by: default avatarJani Nikula <jani.nikula@intel.com>
Co-developed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Co-developed-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Co-developed-by: default avatarMauro Carvalho Chehab <mchehab@kernel.org>
Co-developed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Co-developed-by: default avatarDave Airlie <airlied@redhat.com>
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
parent a839e365
...@@ -6,6 +6,7 @@ CONFIG_DRM=y ...@@ -6,6 +6,7 @@ CONFIG_DRM=y
CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_KMS_HELPER=y CONFIG_DRM_KMS_HELPER=y
CONFIG_DRM_XE=y CONFIG_DRM_XE=y
CONFIG_DRM_XE_DISPLAY=n
CONFIG_EXPERT=y CONFIG_EXPERT=y
CONFIG_FB=y CONFIG_FB=y
CONFIG_DRM_XE_KUNIT_TEST=y CONFIG_DRM_XE_KUNIT_TEST=y
...@@ -12,8 +12,20 @@ config DRM_XE ...@@ -12,8 +12,20 @@ config DRM_XE
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_PANEL select DRM_PANEL
select DRM_SUBALLOC_HELPER select DRM_SUBALLOC_HELPER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_MIPI_DSI
select RELAY select RELAY
select IRQ_WORK select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if X86 && ACPI
select ACPI_BUTTON if ACPI
select ACPI_WMI if ACPI
select SYNC_FILE select SYNC_FILE
select IOSF_MBI select IOSF_MBI
select CRC32 select CRC32
...@@ -33,6 +45,16 @@ config DRM_XE ...@@ -33,6 +45,16 @@ config DRM_XE
If "M" is selected, the module will be called xe. If "M" is selected, the module will be called xe.
config DRM_XE_DISPLAY
bool "Enable display support"
depends on DRM_XE && EXPERT && DRM_XE=m
select FB_IOMEM_HELPERS
select I2C
select I2C_ALGOBIT
default y
help
Disable this option only if you want to compile out display support.
config DRM_XE_FORCE_PROBE config DRM_XE_FORCE_PROBE
string "Force probe xe for selected Intel hardware IDs" string "Force probe xe for selected Intel hardware IDs"
depends on DRM_XE depends on DRM_XE
......
...@@ -24,9 +24,6 @@ subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) ...@@ -24,9 +24,6 @@ subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_XE_WERROR) += -Werror subdir-ccflags-$(CONFIG_DRM_XE_WERROR) += -Werror
# Fine grained warnings disable
CFLAGS_xe_pci.o = $(call cc-disable-warning, override-init)
subdir-ccflags-y += -I$(obj) -I$(srctree)/$(src) subdir-ccflags-y += -I$(obj) -I$(srctree)/$(src)
# generated sources # generated sources
...@@ -126,13 +123,147 @@ xe-y += xe_bb.o \ ...@@ -126,13 +123,147 @@ xe-y += xe_bb.o \
# graphics hardware monitoring (HWMON) support # graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o xe-$(CONFIG_HWMON) += xe_hwmon.o
obj-$(CONFIG_DRM_XE) += xe.o # i915 Display compat #defines and #includes
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/ subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \
-I$(srctree)/$(src)/display/ext \
-I$(srctree)/$(src)/compat-i915-headers \
-I$(srctree)/drivers/gpu/drm/xe/display/ \
-I$(srctree)/drivers/gpu/drm/i915/display/ \
-Ddrm_i915_gem_object=xe_bo \
-Ddrm_i915_private=xe_device
CFLAGS_i915-display/intel_fbdev.o = $(call cc-disable-warning, override-init)
CFLAGS_i915-display/intel_display_device.o = $(call cc-disable-warning, override-init)
# Rule to build SOC code shared with i915
$(obj)/i915-soc/%.o: $(srctree)/drivers/gpu/drm/i915/soc/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
# Rule to build display code shared with i915
$(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
# Display code specific to xe
xe-$(CONFIG_DRM_XE_DISPLAY) += \
xe_display.o \
display/xe_fb_pin.o \
display/xe_hdcp_gsc.o \
display/xe_plane_initial.o \
display/xe_display_rps.o \
display/intel_fbdev_fb.o \
display/intel_fb_bo.o \
display/ext/i915_irq.o \
display/ext/i915_utils.o
# SOC code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-soc/intel_dram.o \
i915-soc/intel_pch.o
# Display code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/icl_dsi.o \
i915-display/intel_atomic.o \
i915-display/intel_atomic_plane.o \
i915-display/intel_audio.o \
i915-display/intel_backlight.o \
i915-display/intel_bios.o \
i915-display/intel_bw.o \
i915-display/intel_cdclk.o \
i915-display/intel_color.o \
i915-display/intel_combo_phy.o \
i915-display/intel_connector.o \
i915-display/intel_crtc.o \
i915-display/intel_crtc_state_dump.o \
i915-display/intel_cursor.o \
i915-display/intel_cx0_phy.o \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_display.o \
i915-display/intel_display_debugfs.o \
i915-display/intel_display_debugfs_params.o \
i915-display/intel_display_device.o \
i915-display/intel_display_driver.o \
i915-display/intel_display_irq.o \
i915-display/intel_display_params.o \
i915-display/intel_display_power.o \
i915-display/intel_display_power_map.o \
i915-display/intel_display_power_well.o \
i915-display/intel_display_trace.o \
i915-display/intel_display_wa.o \
i915-display/intel_dkl_phy.o \
i915-display/intel_dmc.o \
i915-display/intel_dp.o \
i915-display/intel_dp_aux.o \
i915-display/intel_dp_aux_backlight.o \
i915-display/intel_dp_hdcp.o \
i915-display/intel_dp_link_training.o \
i915-display/intel_dp_mst.o \
i915-display/intel_dpll.o \
i915-display/intel_dpll_mgr.o \
i915-display/intel_dpt_common.o \
i915-display/intel_drrs.o \
i915-display/intel_dsb.o \
i915-display/intel_dsi.o \
i915-display/intel_dsi_dcs_backlight.o \
i915-display/intel_dsi_vbt.o \
i915-display/intel_fb.o \
i915-display/intel_fbc.o \
i915-display/intel_fdi.o \
i915-display/intel_fifo_underrun.o \
i915-display/intel_frontbuffer.o \
i915-display/intel_global_state.o \
i915-display/intel_gmbus.o \
i915-display/intel_hdcp.o \
i915-display/intel_hdmi.o \
i915-display/intel_hotplug.o \
i915-display/intel_hotplug_irq.o \
i915-display/intel_hti.o \
i915-display/intel_link_bw.o \
i915-display/intel_lspcon.o \
i915-display/intel_modeset_lock.o \
i915-display/intel_modeset_setup.o \
i915-display/intel_modeset_verify.o \
i915-display/intel_panel.o \
i915-display/intel_pipe_crc.o \
i915-display/intel_pmdemand.o \
i915-display/intel_pps.o \
i915-display/intel_psr.o \
i915-display/intel_qp_tables.o \
i915-display/intel_quirks.o \
i915-display/intel_snps_phy.o \
i915-display/intel_tc.o \
i915-display/intel_vblank.o \
i915-display/intel_vdsc.o \
i915-display/intel_vga.o \
i915-display/intel_vrr.o \
i915-display/intel_wm.o \
i915-display/skl_scaler.o \
i915-display/skl_universal_plane.o \
i915-display/skl_watermark.o
xe-$(CONFIG_PERF_EVENTS) += xe_pmu.o xe-$(CONFIG_PERF_EVENTS) += xe_pmu.o
ifeq ($(CONFIG_ACPI),y)
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_acpi.o \
i915-display/intel_opregion.o
endif
ifeq ($(CONFIG_DRM_FBDEV_EMULATION),y)
xe-$(CONFIG_DRM_XE_DISPLAY) += i915-display/intel_fbdev.o
endif
obj-$(CONFIG_DRM_XE) += xe.o
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/
# header test # header test
hdrtest_find_args := -not -path xe_rtp_helpers.h hdrtest_find_args := -not -path xe_rtp_helpers.h
ifneq ($(CONFIG_DRM_XE_DISPLAY),y)
hdrtest_find_args += -not -path display/\* -not -path compat-i915-headers/\* -not -path xe_display.h
endif
always-$(CONFIG_DRM_XE_WERROR) += \ always-$(CONFIG_DRM_XE_WERROR) += \
$(patsubst %.h,%.hdrtest, $(shell cd $(srctree)/$(src) && find * -name '*.h' $(hdrtest_find_args))) $(patsubst %.h,%.hdrtest, $(shell cd $(srctree)/$(src) && find * -name '*.h' $(hdrtest_find_args)))
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _I915_GEM_MMAN_H_
#define _I915_GEM_MMAN_H_
#include "xe_bo_types.h"
#include <drm/drm_prime.h>
static inline int i915_gem_fb_mmap(struct xe_bo *bo, struct vm_area_struct *vma)
{
return drm_gem_prime_mmap(&bo->ttm.base, vma);
}
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef _I915_GEM_OBJECT_H_
#define _I915_GEM_OBJECT_H_
#include <linux/types.h>
#include "xe_bo.h"
#define i915_gem_object_is_shmem(obj) ((obj)->flags & XE_BO_CREATE_SYSTEM_BIT)
static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n)
{
/* Should never be called */
WARN_ON(1);
return n;
}
static inline bool i915_gem_object_is_tiled(const struct xe_bo *bo)
{
/* legacy tiling is unused */
return false;
}
static inline bool i915_gem_object_is_userptr(const struct xe_bo *bo)
{
/* legacy tiling is unused */
return false;
}
static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
u32 ofs, u64 *ptr, u32 size)
{
struct ttm_bo_kmap_obj map;
void *virtual;
bool is_iomem;
int ret;
XE_WARN_ON(size != 8);
ret = xe_bo_lock(bo, true);
if (ret)
return ret;
ret = ttm_bo_kmap(&bo->ttm, ofs >> PAGE_SHIFT, 1, &map);
if (ret)
goto out_unlock;
ofs &= ~PAGE_MASK;
virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
if (is_iomem)
*ptr = readq((void __iomem *)(virtual + ofs));
else
*ptr = *(u64 *)(virtual + ofs);
ttm_bo_kunmap(&map);
out_unlock:
xe_bo_unlock(bo);
return ret;
}
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_RPS_H__
#define __INTEL_RPS_H__
#define gen5_rps_irq_handler(x) ({})
#endif /* __INTEL_RPS_H__ */
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __I915_CONFIG_H__
#define __I915_CONFIG_H__
#include <linux/sched.h>
struct drm_i915_private;
static inline unsigned long
i915_fence_timeout(const struct drm_i915_private *i915)
{
return MAX_SCHEDULE_TIMEOUT;
}
#endif /* __I915_CONFIG_H__ */
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __I915_DEBUGFS_H__
#define __I915_DEBUGFS_H__
struct drm_i915_gem_object;
struct seq_file;
static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
#endif /* __I915_DEBUGFS_H__ */
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _XE_I915_DRV_H_
#define _XE_I915_DRV_H_
/*
* "Adaptation header" to allow i915 display to also build for xe driver.
* TODO: refactor i915 and xe so this can cease to exist
*/
#include <drm/drm_drv.h>
#include "gem/i915_gem_object.h"
#include "soc/intel_pch.h"
#include "xe_device.h"
#include "xe_bo.h"
#include "xe_pm.h"
#include "xe_step.h"
#include "i915_gpu_error.h"
#include "i915_reg_defs.h"
#include "i915_utils.h"
#include "intel_step.h"
#include "intel_uc_fw.h"
#include "intel_uncore.h"
#include "intel_runtime_pm.h"
#include <linux/pm_runtime.h>
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return container_of(dev, struct drm_i915_private, drm);
}
static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
{
return dev_get_drvdata(kdev);
}
#define INTEL_JASPERLAKE 0
#define INTEL_ELKHARTLAKE 0
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
#define INTEL_DEVID(dev_priv) ((dev_priv)->info.devid)
#define IS_I830(dev_priv) (dev_priv && 0)
#define IS_I845G(dev_priv) (dev_priv && 0)
#define IS_I85X(dev_priv) (dev_priv && 0)
#define IS_I865G(dev_priv) (dev_priv && 0)
#define IS_I915G(dev_priv) (dev_priv && 0)
#define IS_I915GM(dev_priv) (dev_priv && 0)
#define IS_I945G(dev_priv) (dev_priv && 0)
#define IS_I945GM(dev_priv) (dev_priv && 0)
#define IS_I965G(dev_priv) (dev_priv && 0)
#define IS_I965GM(dev_priv) (dev_priv && 0)
#define IS_G45(dev_priv) (dev_priv && 0)
#define IS_GM45(dev_priv) (dev_priv && 0)
#define IS_G4X(dev_priv) (dev_priv && 0)
#define IS_PINEVIEW(dev_priv) (dev_priv && 0)
#define IS_G33(dev_priv) (dev_priv && 0)
#define IS_IRONLAKE(dev_priv) (dev_priv && 0)
#define IS_IRONLAKE_M(dev_priv) (dev_priv && 0)
#define IS_SANDYBRIDGE(dev_priv) (dev_priv && 0)
#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0)
#define IS_IVB_GT1(dev_priv) (dev_priv && 0)
#define IS_VALLEYVIEW(dev_priv) (dev_priv && 0)
#define IS_CHERRYVIEW(dev_priv) (dev_priv && 0)
#define IS_HASWELL(dev_priv) (dev_priv && 0)
#define IS_BROADWELL(dev_priv) (dev_priv && 0)
#define IS_SKYLAKE(dev_priv) (dev_priv && 0)
#define IS_BROXTON(dev_priv) (dev_priv && 0)
#define IS_KABYLAKE(dev_priv) (dev_priv && 0)
#define IS_GEMINILAKE(dev_priv) (dev_priv && 0)
#define IS_COFFEELAKE(dev_priv) (dev_priv && 0)
#define IS_COMETLAKE(dev_priv) (dev_priv && 0)
#define IS_ICELAKE(dev_priv) (dev_priv && 0)
#define IS_JASPERLAKE(dev_priv) (dev_priv && 0)
#define IS_ELKHARTLAKE(dev_priv) (dev_priv && 0)
#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_TIGERLAKE)
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE)
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1)
#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P)
#define IS_XEHPSDV(dev_priv) (dev_priv && 0)
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2)
#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC)
#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE)
#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE)
#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0)
#define IP_VER(ver, rel) ((ver) << 8 | (rel))
#define INTEL_DISPLAY_ENABLED(xe) (HAS_DISPLAY((xe)) && !intel_opregion_headless_sku((xe)))
#define IS_GRAPHICS_VER(xe, first, last) \
((xe)->info.graphics_verx100 >= first * 100 && \
(xe)->info.graphics_verx100 <= (last*100 + 99))
#define IS_MOBILE(xe) (xe && 0)
#define HAS_LLC(xe) (!IS_DGFX((xe)))
#define HAS_GMD_ID(xe) GRAPHICS_VERx100(xe) >= 1270
/* Workarounds not handled yet */
#define IS_DISPLAY_STEP(xe, first, last) ({u8 __step = (xe)->info.step.display; first <= __step && __step <= last; })
#define IS_GRAPHICS_STEP(xe, first, last) ({u8 __step = (xe)->info.step.graphics; first <= __step && __step <= last; })
#define IS_LP(xe) (0)
#define IS_GEN9_LP(xe) (0)
#define IS_GEN9_BC(xe) (0)
#define IS_TIGERLAKE_UY(xe) (xe && 0)
#define IS_COMETLAKE_ULX(xe) (xe && 0)
#define IS_COFFEELAKE_ULX(xe) (xe && 0)
#define IS_KABYLAKE_ULX(xe) (xe && 0)
#define IS_SKYLAKE_ULX(xe) (xe && 0)
#define IS_HASWELL_ULX(xe) (xe && 0)
#define IS_COMETLAKE_ULT(xe) (xe && 0)
#define IS_COFFEELAKE_ULT(xe) (xe && 0)
#define IS_KABYLAKE_ULT(xe) (xe && 0)
#define IS_SKYLAKE_ULT(xe) (xe && 0)
#define IS_DG1_GRAPHICS_STEP(xe, first, last) (IS_DG1(xe) && IS_GRAPHICS_STEP(xe, first, last))
#define IS_DG2_GRAPHICS_STEP(xe, variant, first, last) \
((xe)->info.subplatform == XE_SUBPLATFORM_DG2_ ## variant && \
IS_GRAPHICS_STEP(xe, first, last))
#define IS_XEHPSDV_GRAPHICS_STEP(xe, first, last) (IS_XEHPSDV(xe) && IS_GRAPHICS_STEP(xe, first, last))
/* XXX: No basedie stepping support yet */
#define IS_PVC_BD_STEP(xe, first, last) (!WARN_ON(1) && IS_PONTEVECCHIO(xe))
#define IS_TIGERLAKE_DISPLAY_STEP(xe, first, last) (IS_TIGERLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
#define IS_ROCKETLAKE_DISPLAY_STEP(xe, first, last) (IS_ROCKETLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
#define IS_DG1_DISPLAY_STEP(xe, first, last) (IS_DG1(xe) && IS_DISPLAY_STEP(xe, first, last))
#define IS_DG2_DISPLAY_STEP(xe, first, last) (IS_DG2(xe) && IS_DISPLAY_STEP(xe, first, last))
#define IS_ADLP_DISPLAY_STEP(xe, first, last) (IS_ALDERLAKE_P(xe) && IS_DISPLAY_STEP(xe, first, last))
#define IS_ADLS_DISPLAY_STEP(xe, first, last) (IS_ALDERLAKE_S(xe) && IS_DISPLAY_STEP(xe, first, last))
#define IS_JSL_EHL_DISPLAY_STEP(xe, first, last) (IS_JSL_EHL(xe) && IS_DISPLAY_STEP(xe, first, last))
#define IS_MTL_DISPLAY_STEP(xe, first, last) (IS_METEORLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
/* FIXME: Add subplatform here */
#define IS_MTL_GRAPHICS_STEP(xe, sub, first, last) (IS_METEORLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
#define IS_DG2_G10(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G10)
#define IS_DG2_G11(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G11)
#define IS_DG2_G12(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G12)
#define IS_RAPTORLAKE_U(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_P_RPLU)
#define IS_ICL_WITH_PORT_F(xe) (xe && 0)
#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
#define to_intel_bo(x) gem_to_xe_bo((x))
#define mkwrite_device_info(xe) (INTEL_INFO(xe))
#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
#define intel_has_gpu_reset(a) (a && 0)
#include "intel_wakeref.h"
static inline bool intel_runtime_pm_get(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
if (xe_pm_runtime_get(xe) < 0) {
xe_pm_runtime_put(xe);
return false;
}
return true;
}
static inline bool intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
return xe_pm_runtime_get_if_active(xe);
}
static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
xe_pm_runtime_put(xe);
}
static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, bool wakeref)
{
if (wakeref)
intel_runtime_pm_put_unchecked(pm);
}
#define intel_runtime_pm_get_raw intel_runtime_pm_get
#define intel_runtime_pm_put_raw intel_runtime_pm_put
#define assert_rpm_wakelock_held(x) do { } while (0)
#define assert_rpm_raw_wakeref_held(x) do { } while (0)
#define intel_uncore_forcewake_get(x, y) do { } while (0)
#define intel_uncore_forcewake_put(x, y) do { } while (0)
#define intel_uncore_arm_unclaimed_mmio_detection(x) do { } while (0)
#define I915_PRIORITY_DISPLAY 0
struct i915_sched_attr {
int priority;
};
#define i915_gem_fence_wait_priority(fence, attr) do { (void) attr; } while (0)
#define with_intel_runtime_pm(rpm, wf) \
for ((wf) = intel_runtime_pm_get(rpm); (wf); \
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
#define pdev_to_i915 pdev_to_xe_device
#define RUNTIME_INFO(xe) (&(xe)->info.i915_runtime)
#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
#define HPD_STORM_DEFAULT_THRESHOLD 50
#ifdef CONFIG_ARM64
/*
* arm64 indirectly includes linux/rtc.h,
* which defines a irq_lock, so include it
* here before #define-ing it
*/
#include <linux/rtc.h>
#endif
#define irq_lock irq.lock
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../i915/i915_fixed.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _I915_GPU_ERROR_H_
#define _I915_GPU_ERROR_H_
struct drm_i915_error_state_buf;
__printf(2, 3)
static inline void
i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
{
}
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../i915/i915_irq.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../i915/i915_reg.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../i915/i915_reg_defs.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#define trace_i915_reg_rw(a...) do { } while (0)
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../i915/i915_utils.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
#include <linux/types.h>
struct drm_i915_private;
struct i915_ggtt;
static inline void intel_vgpu_detect(struct drm_i915_private *i915)
{
}
static inline bool intel_vgpu_active(struct drm_i915_private *i915)
{
return false;
}
static inline void intel_vgpu_register(struct drm_i915_private *i915)
{
}
static inline bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915)
{
return false;
}
static inline bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915)
{
return false;
}
static inline bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915)
{
return false;
}
static inline int intel_vgt_balloon(struct i915_ggtt *ggtt)
{
return 0;
}
static inline void intel_vgt_deballoon(struct i915_ggtt *ggtt)
{
}
#endif /* _I915_VGPU_H_ */
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef I915_VMA_H
#define I915_VMA_H
#include <uapi/drm/i915_drm.h>
#include <drm/drm_mm.h>
/* We don't want these from i915_drm.h in case of Xe */
#undef I915_TILING_X
#undef I915_TILING_Y
#define I915_TILING_X 0
#define I915_TILING_Y 0
struct xe_bo;
struct i915_vma {
struct xe_bo *bo, *dpt;
struct drm_mm_node node;
};
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
return vma->node.start;
}
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include <linux/types.h>
#include <linux/build_bug.h>
/* XX: Figure out how to handle this vma mapping in xe */
struct intel_remapped_plane_info {
/* in gtt pages */
u32 offset:31;
u32 linear:1;
union {
/* in gtt pages for !linear */
struct {
u16 width;
u16 height;
u16 src_stride;
u16 dst_stride;
};
/* in gtt pages for linear */
u32 size;
};
} __packed;
struct intel_remapped_info {
struct intel_remapped_plane_info plane[4];
/* in gtt pages */
u32 plane_alignment;
} __packed;
struct intel_rotation_info {
struct intel_remapped_plane_info plane[2];
} __packed;
enum i915_gtt_view_type {
I915_GTT_VIEW_NORMAL = 0,
I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
};
static inline void assert_i915_gem_gtt_types(void)
{
BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 5 * sizeof(u32) + 16 * sizeof(u16));
/* Check that rotation/remapped shares offsets for simplicity */
BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
offsetof(struct intel_rotation_info, plane[0]));
BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
offsetofend(struct intel_rotation_info, plane[1]));
/* As we encode the size of each branch inside the union into its type,
* we have to be careful that each branch has a unique size.
*/
switch ((enum i915_gtt_view_type)0) {
case I915_GTT_VIEW_NORMAL:
case I915_GTT_VIEW_ROTATED:
case I915_GTT_VIEW_REMAPPED:
/* gcc complains if these are identical cases */
break;
}
}
struct i915_gtt_view {
enum i915_gtt_view_type type;
union {
/* Members need to contain no holes/padding */
struct intel_rotation_info rotated;
struct intel_remapped_info remapped;
};
};
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../i915/intel_clock_gating.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../i915/intel_mchbar_regs.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../i915/intel_pci_config.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_PCODE_H__
#define __INTEL_PCODE_H__
#include "intel_uncore.h"
#include "xe_pcode.h"
static inline int
snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
int fast_timeout_us, int slow_timeout_ms)
{
return xe_pcode_write_timeout(__compat_uncore_to_gt(uncore), mbox, val,
slow_timeout_ms ?: 1);
}
static inline int
snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val)
{
return xe_pcode_write(__compat_uncore_to_gt(uncore), mbox, val);
}
static inline int
snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
{
return xe_pcode_read(__compat_uncore_to_gt(uncore), mbox, val, val1);
}
static inline int
skl_pcode_request(struct intel_uncore *uncore, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
int timeout_base_ms)
{
return xe_pcode_request(__compat_uncore_to_gt(uncore), mbox, request, reply_mask, reply,
timeout_base_ms);
}
#endif /* __INTEL_PCODE_H__ */
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "intel_wakeref.h"
enum i915_drm_suspend_mode {
I915_DRM_SUSPEND_IDLE,
I915_DRM_SUSPEND_MEM,
I915_DRM_SUSPEND_HIBERNATE,
};
#define intel_runtime_pm xe_runtime_pm
static inline void disable_rpm_wakeref_asserts(void *rpm)
{
}
static inline void enable_rpm_wakeref_asserts(void *rpm)
{
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_STEP_H__
#define __INTEL_STEP_H__
#include "xe_device_types.h"
#include "xe_step.h"
#define intel_display_step_name xe_display_step_name
static inline
const char *xe_display_step_name(struct xe_device *xe)
{
return xe_step_name(xe->info.step.display);
}
#endif /* __INTEL_STEP_H__ */
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _INTEL_UC_FW_H_
#define _INTEL_UC_FW_H_
#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_UNCORE_H__
#define __INTEL_UNCORE_H__
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_mmio.h"
static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
return xe_root_mmio_gt(xe);
}
static inline u32 intel_uncore_read(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
}
static inline u32 intel_uncore_read8(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
}
static inline u32 intel_uncore_read16(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg);
}
static inline u64
intel_uncore_read64_2x32(struct intel_uncore *uncore,
i915_reg_t i915_lower_reg, i915_reg_t i915_upper_reg)
{
struct xe_reg lower_reg = XE_REG(i915_mmio_reg_offset(i915_lower_reg));
struct xe_reg upper_reg = XE_REG(i915_mmio_reg_offset(i915_upper_reg));
u32 upper, lower, old_upper;
int loop = 0;
upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
do {
old_upper = upper;
lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg);
upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
} while (upper != old_upper && loop++ < 2);
return (u64)upper << 32 | lower;
}
static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
}
static inline void intel_uncore_write(struct intel_uncore *uncore,
i915_reg_t i915_reg, u32 val)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
}
static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
i915_reg_t i915_reg, u32 clear, u32 set)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set);
}
static inline int intel_wait_for_register(struct intel_uncore *uncore,
i915_reg_t i915_reg, u32 mask,
u32 value, unsigned int timeout)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
timeout * USEC_PER_MSEC, NULL, false);
}
static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t i915_reg, u32 mask,
u32 value, unsigned int timeout)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
timeout * USEC_PER_MSEC, NULL, false);
}
static inline int
__intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
u32 mask, u32 value, unsigned int fast_timeout_us,
unsigned int slow_timeout_ms, u32 *out_value)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
fast_timeout_us + 1000 * slow_timeout_ms,
out_value, false);
}
static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
}
static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
i915_reg_t i915_reg, u32 val)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
}
static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
}
static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
i915_reg_t i915_reg, u32 val)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
}
static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
return xe_device_get_root_tile(xe)->mmio.regs;
}
/*
* The raw_reg_{read,write} macros are intended as a micro-optimization for
* interrupt handlers so that the pointer indirection on uncore->regs can
* be computed once (and presumably cached in a register) instead of generating
* extra load instructions for each MMIO access.
*
* Given that these macros are only intended for non-GSI interrupt registers
* (and the goal is to avoid extra instructions generated by the compiler),
* these macros do not account for uncore->gsi_offset. Any caller that needs
* to use these macros on a GSI register is responsible for adding the
* appropriate GSI offset to the 'base' parameter.
*/
#define raw_reg_read(base, reg) \
readl(base + i915_mmio_reg_offset(reg))
#define raw_reg_write(base, reg, value) \
writel(value, base + i915_mmio_reg_offset(reg))
#endif /* __INTEL_UNCORE_H__ */
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include <linux/types.h>
typedef bool intel_wakeref_t;
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_PXP_H__
#define __INTEL_PXP_H__
#include <linux/errno.h>
#include <linux/types.h>
struct drm_i915_gem_object;
struct intel_pxp;
static inline int intel_pxp_key_check(struct intel_pxp *pxp,
struct drm_i915_gem_object *obj,
bool assign)
{
return -ENODEV;
}
static inline bool
i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
{
return false;
}
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../../i915/soc/intel_dram.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../../i915/soc/intel_gmch.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../../i915/soc/intel_pch.h"
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2013-2021 Intel Corporation
*/
#ifndef _VLV_SIDEBAND_H_
#define _VLV_SIDEBAND_H_
#include <linux/types.h>
#include "vlv_sideband_reg.h"
enum pipe;
struct drm_i915_private;
enum {
VLV_IOSF_SB_BUNIT,
VLV_IOSF_SB_CCK,
VLV_IOSF_SB_CCU,
VLV_IOSF_SB_DPIO,
VLV_IOSF_SB_FLISDSI,
VLV_IOSF_SB_GPIO,
VLV_IOSF_SB_NC,
VLV_IOSF_SB_PUNIT,
};
static inline void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
{
}
static inline u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
{
return 0;
}
static inline void vlv_iosf_sb_write(struct drm_i915_private *i915,
u8 port, u32 reg, u32 val)
{
}
static inline void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
{
}
static inline void vlv_bunit_get(struct drm_i915_private *i915)
{
}
static inline u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
{
return 0;
}
static inline void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
}
static inline void vlv_bunit_put(struct drm_i915_private *i915)
{
}
static inline void vlv_cck_get(struct drm_i915_private *i915)
{
}
static inline u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
{
return 0;
}
static inline void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
}
static inline void vlv_cck_put(struct drm_i915_private *i915)
{
}
static inline void vlv_ccu_get(struct drm_i915_private *i915)
{
}
static inline u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
{
return 0;
}
static inline void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
}
static inline void vlv_ccu_put(struct drm_i915_private *i915)
{
}
static inline void vlv_dpio_get(struct drm_i915_private *i915)
{
}
static inline u32 vlv_dpio_read(struct drm_i915_private *i915, int pipe, int reg)
{
return 0;
}
static inline void vlv_dpio_write(struct drm_i915_private *i915,
int pipe, int reg, u32 val)
{
}
static inline void vlv_dpio_put(struct drm_i915_private *i915)
{
}
static inline void vlv_flisdsi_get(struct drm_i915_private *i915)
{
}
static inline u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
{
return 0;
}
static inline void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
}
static inline void vlv_flisdsi_put(struct drm_i915_private *i915)
{
}
static inline void vlv_nc_get(struct drm_i915_private *i915)
{
}
static inline u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
{
return 0;
}
static inline void vlv_nc_put(struct drm_i915_private *i915)
{
}
static inline void vlv_punit_get(struct drm_i915_private *i915)
{
}
static inline u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
{
return 0;
}
static inline int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
{
return 0;
}
static inline void vlv_punit_put(struct drm_i915_private *i915)
{
}
#endif /* _VLV_SIDEBAND_H_ */
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "../../i915/vlv_sideband_reg.h"
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_uncore.h"
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier)
{
intel_uncore_write(uncore, imr, 0xffffffff);
intel_uncore_posting_read(uncore, imr);
intel_uncore_write(uncore, ier, 0);
/* IIR can theoretically queue up two events. Be paranoid. */
intel_uncore_write(uncore, iir, 0xffffffff);
intel_uncore_posting_read(uncore, iir);
intel_uncore_write(uncore, iir, 0xffffffff);
intel_uncore_posting_read(uncore, iir);
}
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
u32 val = intel_uncore_read(uncore, reg);
if (val == 0)
return;
drm_WARN(&xe->drm, 1,
"Interrupt register 0x%x is not zero: 0x%08x\n",
i915_mmio_reg_offset(reg), val);
intel_uncore_write(uncore, reg, 0xffffffff);
intel_uncore_posting_read(uncore, reg);
intel_uncore_write(uncore, reg, 0xffffffff);
intel_uncore_posting_read(uncore, reg);
}
void gen3_irq_init(struct intel_uncore *uncore,
i915_reg_t imr, u32 imr_val,
i915_reg_t ier, u32 ier_val,
i915_reg_t iir)
{
gen3_assert_iir_is_zero(uncore, iir);
intel_uncore_write(uncore, ier, ier_val);
intel_uncore_write(uncore, imr, imr_val);
intel_uncore_posting_read(uncore, imr);
}
bool intel_irqs_enabled(struct xe_device *xe)
{
/*
* XXX: i915 has a racy handling of the irq.enabled, since it doesn't
* lock its transitions. Because of that, the irq.enabled sometimes
* is not read with the irq.lock in place.
* However, the most critical cases like vblank and page flips are
* properly using the locks.
* We cannot take the lock in here or run any kind of assert because
* of i915 inconsistency.
* But at this point the xe irq is better protected against races,
* although the full solution would be protecting the i915 side.
*/
return xe->irq.enabled;
}
void intel_synchronize_irq(struct xe_device *xe)
{
synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
}
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "i915_drv.h"
bool i915_vtd_active(struct drm_i915_private *i915)
{
if (device_iommu_mapped(i915->drm.dev))
return true;
/* Running as a guest, we assume the host is enforcing VT'd */
return i915_run_as_guest();
}
/* i915 specific, just put here for shutting it up */
int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
const char *func, int line)
{
return 0;
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#include <drm/drm_modeset_helper.h>
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_fb_bo.h"
void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
{
if (bo->flags & XE_BO_CREATE_PINNED_BIT) {
/* Unpin our kernel fb first */
xe_bo_lock(bo, false);
xe_bo_unpin(bo);
xe_bo_unlock(bo);
}
xe_bo_put(bo);
}
int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
struct xe_bo *bo,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_private *i915 = to_i915(bo->ttm.base.dev);
int ret;
xe_bo_get(bo);
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
if (ret)
return ret;
if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
/*
* XE_BO_SCANOUT_BIT should ideally be set at creation, or is
* automatically set when creating FB. We cannot change caching
* mode when the boect is VM_BINDed, so we can only set
* coherency with display when unbound.
*/
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
ttm_bo_unreserve(&bo->ttm);
return -EINVAL;
}
bo->flags |= XE_BO_SCANOUT_BIT;
}
ttm_bo_unreserve(&bo->ttm);
return ret;
}
struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_gem_object *bo;
struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
bo = gem_to_xe_bo(gem);
/* Require vram placement or dma-buf import */
if (IS_DGFX(i915) &&
!xe_bo_can_migrate(gem_to_xe_bo(gem), XE_PL_VRAM0) &&
bo->ttm.type != ttm_bo_type_sg) {
drm_gem_object_put(gem);
return ERR_PTR(-EREMOTE);
}
return bo;
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __INTEL_FB_BO_H__
#define __INTEL_FB_BO_H__
struct drm_file;
struct drm_mode_fb_cmd2;
struct drm_i915_private;
struct intel_framebuffer;
struct xe_bo;
void intel_fb_bo_framebuffer_fini(struct xe_bo *bo);
int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
struct xe_bo *bo,
struct drm_mode_fb_cmd2 *mode_cmd);
struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *mode_cmd);
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#include "intel_fbdev_fb.h"
#include <drm/drm_fb_helper.h>
#include "xe_gt.h"
#include "xe_ttm_stolen_mgr.h"
#include "i915_drv.h"
#include "intel_display_types.h"
struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
sizes->surface_bpp = 32;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
DIV_ROUND_UP(sizes->surface_bpp, 8), XE_PAGE_SIZE);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
if (!IS_DGFX(dev_priv)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv),
NULL, size,
ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
XE_BO_CREATE_STOLEN_BIT |
XE_BO_CREATE_PINNED_BIT);
if (!IS_ERR(obj))
drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n");
else
drm_info(&dev_priv->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj));
}
if (IS_ERR(obj)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size,
ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
XE_BO_CREATE_PINNED_BIT);
}
if (IS_ERR(obj)) {
drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
fb = ERR_PTR(-ENOMEM);
goto err;
}
fb = intel_framebuffer_create(obj, &mode_cmd);
if (IS_ERR(fb)) {
xe_bo_unpin_map_no_vm(obj);
goto err;
}
drm_gem_object_put(intel_bo_to_drm_bo(obj));
return fb;
err:
return fb;
}
int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
struct drm_i915_gem_object *obj, struct i915_vma *vma)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) {
if (obj->flags & XE_BO_CREATE_STOLEN_BIT)
info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0);
else
info->fix.smem_start =
pci_resource_start(pdev, 2) +
xe_bo_addr(obj, 0, XE_PAGE_SIZE);
info->fix.smem_len = obj->ttm.base.size;
} else {
/* XXX: Pure fiction, as the BO may not be physically accessible.. */
info->fix.smem_start = 0;
info->fix.smem_len = obj->ttm.base.size;
}
XE_WARN_ON(iosys_map_is_null(&obj->vmap));
info->screen_base = obj->vmap.vaddr_iomem;
info->screen_size = intel_bo_to_drm_bo(obj)->size;
return 0;
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_FBDEV_FB_H__
#define __INTEL_FBDEV_FB_H__
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
struct drm_i915_gem_object;
struct drm_i915_private;
struct fb_info;
struct i915_vma;
struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
struct drm_i915_gem_object *obj, struct i915_vma *vma);
#endif
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "intel_display_rps.h"
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence)
{
}
void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
struct intel_atomic_state *state,
bool interactive)
{
}
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include <drm/ttm/ttm_bo.h>
static void
write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs,
u32 width, u32 height, u32 src_stride, u32 dst_stride)
{
struct xe_device *xe = xe_bo_device(bo);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
u32 column, row;
/* TODO: Maybe rewrite so we can traverse the bo addresses sequentially,
* by writing dpt/ggtt in a different order?
*/
for (column = 0; column < width; column++) {
u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
for (row = 0; row < height; row++) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
xe->pat.idx[XE_CACHE_WB]);
iosys_map_wr(map, *dpt_ofs, u64, pte);
*dpt_ofs += 8;
src_idx -= src_stride;
}
/* The DE ignores the PTEs for the padding tiles */
*dpt_ofs += (dst_stride - height) * 8;
}
/* Align to next page */
*dpt_ofs = ALIGN(*dpt_ofs, 4096);
}
static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
struct i915_vma *vma)
{
struct xe_device *xe = to_xe_device(fb->base.dev);
struct xe_tile *tile0 = xe_device_get_root_tile(xe);
struct xe_ggtt *ggtt = tile0->mem.ggtt;
struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt;
u32 dpt_size, size = bo->ttm.base.size;
if (view->type == I915_GTT_VIEW_NORMAL)
dpt_size = ALIGN(size / XE_PAGE_SIZE * 8, XE_PAGE_SIZE);
else
/* display uses 4K tiles instead of bytes here, convert to entries.. */
dpt_size = ALIGN(intel_rotation_info_size(&view->rotated) * 8,
XE_PAGE_SIZE);
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM0_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(dpt))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
XE_BO_CREATE_STOLEN_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(dpt))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
if (view->type == I915_GTT_VIEW_NORMAL) {
u32 x;
for (x = 0; x < size / XE_PAGE_SIZE; x++) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE,
xe->pat.idx[XE_CACHE_WB]);
iosys_map_wr(&dpt->vmap, x * 8, u64, pte);
}
} else {
const struct intel_rotation_info *rot_info = &view->rotated;
u32 i, dpt_ofs = 0;
for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
write_dpt_rotated(bo, &dpt->vmap, &dpt_ofs,
rot_info->plane[i].offset,
rot_info->plane[i].width,
rot_info->plane[i].height,
rot_info->plane[i].src_stride,
rot_info->plane[i].dst_stride);
}
vma->dpt = dpt;
vma->node = dpt->ggtt_node;
return 0;
}
static void
write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo_ofs,
u32 width, u32 height, u32 src_stride, u32 dst_stride)
{
struct xe_device *xe = xe_bo_device(bo);
u32 column, row;
for (column = 0; column < width; column++) {
u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
for (row = 0; row < height; row++) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
xe->pat.idx[XE_CACHE_WB]);
xe_ggtt_set_pte(ggtt, *ggtt_ofs, pte);
*ggtt_ofs += XE_PAGE_SIZE;
src_idx -= src_stride;
}
/* The DE ignores the PTEs for the padding tiles */
*ggtt_ofs += (dst_stride - height) * XE_PAGE_SIZE;
}
}
static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
struct i915_vma *vma)
{
struct xe_bo *bo = intel_fb_obj(&fb->base);
struct xe_device *xe = to_xe_device(fb->base.dev);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
u32 align;
int ret;
/* TODO: Consider sharing framebuffer mapping?
* embed i915_vma inside intel_framebuffer
*/
xe_device_mem_access_get(tile_to_xe(ggtt->tile));
ret = mutex_lock_interruptible(&ggtt->lock);
if (ret)
goto out;
align = XE_PAGE_SIZE;
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
if (bo->ggtt_node.size && view->type == I915_GTT_VIEW_NORMAL) {
vma->node = bo->ggtt_node;
} else if (view->type == I915_GTT_VIEW_NORMAL) {
u32 x, size = bo->ttm.base.size;
ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size,
align, 0);
if (ret)
goto out_unlock;
for (x = 0; x < size; x += XE_PAGE_SIZE) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
xe->pat.idx[XE_CACHE_WB]);
xe_ggtt_set_pte(ggtt, vma->node.start + x, pte);
}
} else {
u32 i, ggtt_ofs;
const struct intel_rotation_info *rot_info = &view->rotated;
/* display seems to use tiles instead of bytes here, so convert it back.. */
u32 size = intel_rotation_info_size(rot_info) * XE_PAGE_SIZE;
ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size,
align, 0);
if (ret)
goto out_unlock;
ggtt_ofs = vma->node.start;
for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
write_ggtt_rotated(bo, ggtt, &ggtt_ofs,
rot_info->plane[i].offset,
rot_info->plane[i].width,
rot_info->plane[i].height,
rot_info->plane[i].src_stride,
rot_info->plane[i].dst_stride);
}
xe_ggtt_invalidate(ggtt);
out_unlock:
mutex_unlock(&ggtt->lock);
out:
xe_device_mem_access_put(tile_to_xe(ggtt->tile));
return ret;
}
static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb,
const struct i915_gtt_view *view)
{
struct drm_device *dev = fb->base.dev;
struct xe_device *xe = to_xe_device(dev);
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
struct xe_bo *bo = intel_fb_obj(&fb->base);
int ret;
if (!vma)
return ERR_PTR(-ENODEV);
/* Remapped view is only required on ADL-P, which xe doesn't support. */
if (XE_WARN_ON(view->type == I915_GTT_VIEW_REMAPPED)) {
ret = -ENODEV;
goto err;
}
/*
* Pin the framebuffer, we can't use xe_bo_(un)pin functions as the
* assumptions are incorrect for framebuffers
*/
ret = ttm_bo_reserve(&bo->ttm, false, false, NULL);
if (ret)
goto err;
if (IS_DGFX(xe))
ret = xe_bo_migrate(bo, XE_PL_VRAM0);
else
ret = xe_bo_validate(bo, NULL, true);
if (!ret)
ttm_bo_pin(&bo->ttm);
ttm_bo_unreserve(&bo->ttm);
if (ret)
goto err;
vma->bo = bo;
if (intel_fb_uses_dpt(&fb->base))
ret = __xe_pin_fb_vma_dpt(fb, view, vma);
else
ret = __xe_pin_fb_vma_ggtt(fb, view, vma);
if (ret)
goto err_unpin;
return vma;
err_unpin:
ttm_bo_reserve(&bo->ttm, false, false, NULL);
ttm_bo_unpin(&bo->ttm);
ttm_bo_unreserve(&bo->ttm);
err:
kfree(vma);
return ERR_PTR(ret);
}
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
struct xe_device *xe = to_xe_device(vma->bo->ttm.base.dev);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
if (vma->dpt)
xe_bo_unpin_map_no_vm(vma->dpt);
else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) ||
vma->bo->ggtt_node.start != vma->node.start)
xe_ggtt_remove_node(ggtt, &vma->node);
ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
ttm_bo_unpin(&vma->bo->ttm);
ttm_bo_unreserve(&vma->bo->ttm);
kfree(vma);
}
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
bool phys_cursor,
const struct i915_gtt_view *view,
bool uses_fence,
unsigned long *out_flags)
{
*out_flags = 0;
return __xe_pin_fb_vma(to_intel_framebuffer(fb), view);
}
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
__xe_unpin_fb_vma(vma);
}
int intel_plane_pin_fb(struct intel_plane_state *plane_state)
{
struct drm_framebuffer *fb = plane_state->hw.fb;
struct xe_bo *bo = intel_fb_obj(fb);
struct i915_vma *vma;
/* We reject creating !SCANOUT fb's, so this is weird.. */
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT));
vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt);
if (IS_ERR(vma))
return PTR_ERR(vma);
plane_state->ggtt_vma = vma;
return 0;
}
void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
{
__xe_unpin_fb_vma(old_plane_state->ggtt_vma);
old_plane_state->ggtt_vma = NULL;
}
/*
* For Xe introduce dummy intel_dpt_create which just return NULL and
* intel_dpt_destroy which does nothing.
*/
struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb)
{
return NULL;
}
void intel_dpt_destroy(struct i915_address_space *vm)
{
return;
}
\ No newline at end of file
// SPDX-License-Identifier: MIT
/*
* Copyright 2023, Intel Corporation.
*/
#include "i915_drv.h"
#include "intel_hdcp_gsc.h"
int intel_hdcp_gsc_init(struct drm_i915_private *i915)
{
drm_info(&i915->drm, "HDCP support not yet implemented\n");
return -ENODEV;
}
void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
{
}
ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
size_t msg_in_len, u8 *msg_out,
size_t msg_out_len)
{
return -ENODEV;
}
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
/* for ioread64 */
#include <linux/io-64-nonatomic-lo-hi.h>
#include "xe_ggtt.h"
#include "i915_drv.h"
#include "intel_atomic_plane.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_plane_initial.h"
static bool
intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
const struct intel_initial_plane_config *plane_config,
struct drm_framebuffer **fb)
{
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
if (!crtc_state->uapi.active)
continue;
if (!plane_state->ggtt_vma)
continue;
if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
*fb = plane_state->hw.fb;
return true;
}
}
return false;
}
static struct xe_bo *
initial_plane_bo(struct xe_device *xe,
struct intel_initial_plane_config *plane_config)
{
struct xe_tile *tile0 = xe_device_get_root_tile(xe);
struct xe_bo *bo;
resource_size_t phys_base;
u32 base, size, flags;
u64 page_size = xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
if (plane_config->size == 0)
return NULL;
flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
u64 __iomem *gte = tile0->mem.ggtt->gsm;
u64 pte;
gte += base / XE_PAGE_SIZE;
pte = ioread64(gte);
if (!(pte & XE_GGTT_PTE_DM)) {
drm_err(&xe->drm,
"Initial plane programming missing DM bit\n");
return NULL;
}
phys_base = pte & ~(page_size - 1);
flags |= XE_BO_CREATE_VRAM0_BIT;
/*
* We don't currently expect this to ever be placed in the
* stolen portion.
*/
if (phys_base >= tile0->mem.vram.usable_size) {
drm_err(&xe->drm,
"Initial plane programming using invalid range, phys_base=%pa\n",
&phys_base);
return NULL;
}
drm_dbg(&xe->drm,
"Using phys_base=%pa, based on initial plane programming\n",
&phys_base);
} else {
struct ttm_resource_manager *stolen = ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
if (!stolen)
return NULL;
phys_base = base;
flags |= XE_BO_CREATE_STOLEN_BIT;
/*
* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features.
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
plane_config->size * 2 >> PAGE_SHIFT >= stolen->size)
return NULL;
}
size = round_up(plane_config->base + plane_config->size,
page_size);
size -= base;
bo = xe_bo_create_pin_map_at(xe, tile0, NULL, size, phys_base,
ttm_bo_type_kernel, flags);
if (IS_ERR(bo)) {
drm_dbg(&xe->drm,
"Failed to create bo phys_base=%pa size %u with flags %x: %li\n",
&phys_base, size, flags, PTR_ERR(bo));
return NULL;
}
return bo;
}
static bool
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
struct xe_bo *bo;
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_4_TILED:
break;
default:
drm_dbg(&dev_priv->drm,
"Unsupported modifier for initial FB: 0x%llx\n",
fb->modifier);
return false;
}
mode_cmd.pixel_format = fb->format->format;
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
mode_cmd.pitches[0] = fb->pitches[0];
mode_cmd.modifier[0] = fb->modifier;
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
bo = initial_plane_bo(dev_priv, plane_config);
if (!bo)
return false;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
bo, &mode_cmd)) {
drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
goto err_bo;
}
/* Reference handed over to fb */
xe_bo_put(bo);
return true;
err_bo:
xe_bo_unpin_map_no_vm(bo);
return false;
}
static void
intel_find_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct drm_framebuffer *fb;
struct i915_vma *vma;
/*
* TODO:
* Disable planes if get_initial_plane_config() failed.
* Make sure things work if the surface base is not page aligned.
*/
if (!plane_config->fb)
return;
if (intel_alloc_initial_plane_obj(crtc, plane_config))
fb = &plane_config->fb->base;
else if (!intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb))
goto nofb;
plane_state->uapi.rotation = plane_config->rotation;
intel_fb_fill_view(to_intel_framebuffer(fb),
plane_state->uapi.rotation, &plane_state->view);
vma = intel_pin_and_fence_fb_obj(fb, false, &plane_state->view.gtt,
false, &plane_state->flags);
if (IS_ERR(vma))
goto nofb;
plane_state->ggtt_vma = vma;
plane_state->uapi.src_x = 0;
plane_state->uapi.src_y = 0;
plane_state->uapi.src_w = fb->width << 16;
plane_state->uapi.src_h = fb->height << 16;
plane_state->uapi.crtc_x = 0;
plane_state->uapi.crtc_y = 0;
plane_state->uapi.crtc_w = fb->width;
plane_state->uapi.crtc_h = fb->height;
plane_state->uapi.fb = fb;
drm_framebuffer_get(fb);
plane_state->uapi.crtc = &crtc->base;
intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
plane_config->vma = vma;
/*
* Flip to the newly created mapping ASAP, so we can re-use the
* first part of GGTT for WOPCM, prevent flickering, and prevent
* the lookup of sysmem scratch pages.
*/
plane->check_plane(crtc_state, plane_state);
plane->async_flip(plane, crtc_state, plane_state, true);
return;
nofb:
/*
* We've failed to reconstruct the BIOS FB. Current display state
* indicates that the primary plane is visible, but has a NULL FB,
* which will lead to problems later if we don't fix it up. The
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
intel_plane_disable_noatomic(crtc, plane);
}
static void plane_config_fini(struct intel_initial_plane_config *plane_config)
{
if (plane_config->fb) {
struct drm_framebuffer *fb = &plane_config->fb->base;
/* We may only have the stub and not a full framebuffer */
if (drm_framebuffer_read_refcount(fb))
drm_framebuffer_put(fb);
else
kfree(fb);
}
}
void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
{
struct xe_device *xe = to_xe_device(crtc->base.dev);
struct intel_initial_plane_config plane_config = {};
/*
* Note that reserving the BIOS fb up front prevents us
* from stuffing other stolen allocations like the ring
* on top. This prevents some ugliness at boot time, and
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
xe->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
/*
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
intel_find_initial_plane_obj(crtc, &plane_config);
plane_config_fini(&plane_config);
}
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#ifndef _XE_REG_DEFS_H_ #ifndef _XE_REG_DEFS_H_
#define _XE_REG_DEFS_H_ #define _XE_REG_DEFS_H_
#include "../../i915/i915_reg_defs.h" #include "compat-i915-headers/i915_reg_defs.h"
/** /**
* struct xe_reg - Register definition * struct xe_reg - Register definition
......
...@@ -56,19 +56,6 @@ ...@@ -56,19 +56,6 @@
#define GU_MISC_IRQ_OFFSET 0x444f0 #define GU_MISC_IRQ_OFFSET 0x444f0
#define GU_MISC_GSE REG_BIT(27) #define GU_MISC_GSE REG_BIT(27)
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
#define TRANSCODER_C_OFFSET 0x62000
#define TRANSCODER_D_OFFSET 0x63000
#define TRANSCODER_DSI0_OFFSET 0x6b000
#define TRANSCODER_DSI1_OFFSET 0x6b800
#define PIPE_A_OFFSET 0x70000
#define PIPE_B_OFFSET 0x71000
#define PIPE_C_OFFSET 0x72000
#define PIPE_D_OFFSET 0x73000
#define PIPE_DSI0_OFFSET 0x7b000
#define PIPE_DSI1_OFFSET 0x7b800
#define SOFTWARE_FLAGS_SPR33 XE_REG(0x4f084) #define SOFTWARE_FLAGS_SPR33 XE_REG(0x4f084)
#define GU_CNTL_PROTECTED XE_REG(0x10100C) #define GU_CNTL_PROTECTED XE_REG(0x10100C)
......
...@@ -1400,9 +1400,9 @@ xe_bo_create_locked_range(struct xe_device *xe, ...@@ -1400,9 +1400,9 @@ xe_bo_create_locked_range(struct xe_device *xe,
xe_assert(xe, tile); xe_assert(xe, tile);
if (flags & XE_BO_CREATE_STOLEN_BIT && if (flags & XE_BO_FIXED_PLACEMENT_BIT) {
flags & XE_BO_FIXED_PLACEMENT_BIT) { err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, start); start + bo->size, U64_MAX);
} else { } else {
err = xe_ggtt_insert_bo(tile->mem.ggtt, bo); err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "regs/xe_regs.h" #include "regs/xe_regs.h"
#include "xe_bo.h" #include "xe_bo.h"
#include "xe_debugfs.h" #include "xe_debugfs.h"
#include "xe_display.h"
#include "xe_dma_buf.h" #include "xe_dma_buf.h"
#include "xe_drm_client.h" #include "xe_drm_client.h"
#include "xe_drv.h" #include "xe_drv.h"
...@@ -190,6 +191,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy) ...@@ -190,6 +191,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
if (xe->ordered_wq) if (xe->ordered_wq)
destroy_workqueue(xe->ordered_wq); destroy_workqueue(xe->ordered_wq);
if (xe->unordered_wq)
destroy_workqueue(xe->unordered_wq);
ttm_device_fini(&xe->ttm); ttm_device_fini(&xe->ttm);
} }
...@@ -199,6 +203,8 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, ...@@ -199,6 +203,8 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
struct xe_device *xe; struct xe_device *xe;
int err; int err;
xe_display_driver_set_hooks(&driver);
err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
...@@ -237,14 +243,16 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, ...@@ -237,14 +243,16 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
INIT_LIST_HEAD(&xe->pinned.evicted); INIT_LIST_HEAD(&xe->pinned.evicted);
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
if (!xe->ordered_wq) { xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
drm_err(&xe->drm, "Failed to allocate xe-ordered-wq\n"); if (!xe->ordered_wq || !xe->unordered_wq) {
drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
err = -ENOMEM; err = -ENOMEM;
goto err_put; goto err_put;
} }
drmm_mutex_init(&xe->drm, &xe->sb_lock); err = xe_display_create(xe);
xe->enabled_irq_mask = ~0; if (WARN_ON(err))
goto err_put;
return xe; return xe;
...@@ -346,6 +354,9 @@ int xe_device_probe(struct xe_device *xe) ...@@ -346,6 +354,9 @@ int xe_device_probe(struct xe_device *xe)
xe_pat_init_early(xe); xe_pat_init_early(xe);
xe->info.mem_region_mask = 1; xe->info.mem_region_mask = 1;
err = xe_display_init_nommio(xe);
if (err)
return err;
for_each_tile(tile, xe, id) { for_each_tile(tile, xe, id) {
err = xe_tile_alloc(tile); err = xe_tile_alloc(tile);
...@@ -367,10 +378,14 @@ int xe_device_probe(struct xe_device *xe) ...@@ -367,10 +378,14 @@ int xe_device_probe(struct xe_device *xe)
return err; return err;
} }
err = xe_irq_install(xe); err = xe_display_init_noirq(xe);
if (err) if (err)
return err; return err;
err = xe_irq_install(xe);
if (err)
goto err;
for_each_gt(gt, xe, id) { for_each_gt(gt, xe, id) {
err = xe_gt_init_early(gt); err = xe_gt_init_early(gt);
if (err) if (err)
...@@ -392,6 +407,16 @@ int xe_device_probe(struct xe_device *xe) ...@@ -392,6 +407,16 @@ int xe_device_probe(struct xe_device *xe)
/* Allocate and map stolen after potential VRAM resize */ /* Allocate and map stolen after potential VRAM resize */
xe_ttm_stolen_mgr_init(xe); xe_ttm_stolen_mgr_init(xe);
/*
* Now that GT is initialized (TTM in particular),
* we can try to init display, and inherit the initial fb.
* This is the reason the first allocation needs to be done
* inside display.
*/
err = xe_display_init_noaccel(xe);
if (err)
goto err_irq_shutdown;
for_each_gt(gt, xe, id) { for_each_gt(gt, xe, id) {
err = xe_gt_init(gt); err = xe_gt_init(gt);
if (err) if (err)
...@@ -400,10 +425,16 @@ int xe_device_probe(struct xe_device *xe) ...@@ -400,10 +425,16 @@ int xe_device_probe(struct xe_device *xe)
xe_heci_gsc_init(xe); xe_heci_gsc_init(xe);
err = xe_display_init(xe);
if (err)
goto err_fini_display;
err = drm_dev_register(&xe->drm, 0); err = drm_dev_register(&xe->drm, 0);
if (err) if (err)
goto err_irq_shutdown; goto err_irq_shutdown;
xe_display_register(xe);
xe_debugfs_register(xe); xe_debugfs_register(xe);
xe_pmu_register(&xe->pmu); xe_pmu_register(&xe->pmu);
...@@ -416,13 +447,30 @@ int xe_device_probe(struct xe_device *xe) ...@@ -416,13 +447,30 @@ int xe_device_probe(struct xe_device *xe)
return 0; return 0;
err_fini_display:
xe_display_driver_remove(xe);
err_irq_shutdown: err_irq_shutdown:
xe_irq_shutdown(xe); xe_irq_shutdown(xe);
err:
xe_display_fini(xe);
return err; return err;
} }
static void xe_device_remove_display(struct xe_device *xe)
{
xe_display_unregister(xe);
drm_dev_unplug(&xe->drm);
xe_display_driver_remove(xe);
}
void xe_device_remove(struct xe_device *xe) void xe_device_remove(struct xe_device *xe)
{ {
xe_device_remove_display(xe);
xe_display_fini(xe);
xe_heci_gsc_fini(xe); xe_heci_gsc_fini(xe);
xe_irq_shutdown(xe); xe_irq_shutdown(xe);
......
...@@ -20,6 +20,12 @@ ...@@ -20,6 +20,12 @@
#include "xe_pmu.h" #include "xe_pmu.h"
#include "xe_step_types.h" #include "xe_step_types.h"
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
#include "soc/intel_pch.h"
#include "intel_display_core.h"
#include "intel_display_device.h"
#endif
struct xe_ggtt; struct xe_ggtt;
struct xe_pat_ops; struct xe_pat_ops;
...@@ -247,12 +253,20 @@ struct xe_device { ...@@ -247,12 +253,20 @@ struct xe_device {
u8 has_llc:1; u8 has_llc:1;
/** @has_range_tlb_invalidation: Has range based TLB invalidations */ /** @has_range_tlb_invalidation: Has range based TLB invalidations */
u8 has_range_tlb_invalidation:1; u8 has_range_tlb_invalidation:1;
/** @enable_display: display enabled */
u8 enable_display:1;
/** @bypass_mtcfg: Bypass Multi-Tile configuration from MTCFG register */ /** @bypass_mtcfg: Bypass Multi-Tile configuration from MTCFG register */
u8 bypass_mtcfg:1; u8 bypass_mtcfg:1;
/** @supports_mmio_ext: supports MMIO extension/s */ /** @supports_mmio_ext: supports MMIO extension/s */
u8 supports_mmio_ext:1; u8 supports_mmio_ext:1;
/** @has_heci_gscfi: device has heci gscfi */ /** @has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1; u8 has_heci_gscfi:1;
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
struct {
u32 rawclk_freq;
} i915_runtime;
#endif
} info; } info;
/** @irq: device interrupt state */ /** @irq: device interrupt state */
...@@ -323,6 +337,9 @@ struct xe_device { ...@@ -323,6 +337,9 @@ struct xe_device {
/** @ordered_wq: used to serialize compute mode resume */ /** @ordered_wq: used to serialize compute mode resume */
struct workqueue_struct *ordered_wq; struct workqueue_struct *ordered_wq;
/** @unordered_wq: used to serialize unordered work, mostly display */
struct workqueue_struct *unordered_wq;
/** @tiles: device tiles */ /** @tiles: device tiles */
struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE]; struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
...@@ -391,10 +408,79 @@ struct xe_device { ...@@ -391,10 +408,79 @@ struct xe_device {
/** @needs_flr_on_fini: requests function-reset on fini */ /** @needs_flr_on_fini: requests function-reset on fini */
bool needs_flr_on_fini; bool needs_flr_on_fini;
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
/*
* Any fields below this point are the ones used by display.
* They are temporarily added here so xe_device can be desguised as
* drm_i915_private during build. After cleanup these should go away,
* migrating to the right sub-structs
*/
struct intel_display display;
enum intel_pch pch_type;
u16 pch_id;
struct dram_info {
bool wm_lv_0_adjust_needed;
u8 num_channels;
bool symmetric_memory;
enum intel_dram_type {
INTEL_DRAM_UNKNOWN,
INTEL_DRAM_DDR3,
INTEL_DRAM_DDR4,
INTEL_DRAM_LPDDR3,
INTEL_DRAM_LPDDR4,
INTEL_DRAM_DDR5,
INTEL_DRAM_LPDDR5,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
} dram_info;
/*
* edram size in MB.
* Cannot be determined by PCIID. You must always read a register.
*/
u32 edram_size_mb;
/* To shut up runtime pm macros.. */
struct xe_runtime_pm {} runtime_pm;
/* For pcode */ /* For pcode */
struct mutex sb_lock; struct mutex sb_lock;
/* Should be in struct intel_display */
u32 skl_preferred_vco_freq, max_dotclk_freq, hti_state;
u8 snps_phy_failed_calibration;
struct drm_atomic_state *modeset_restore_state;
struct list_head global_obj_list;
union {
/* only to allow build, not used functionally */
u32 irq_mask;
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 pipestat_irq_mask[I915_MAX_PIPES];
bool display_irqs_enabled;
u32 enabled_irq_mask; u32 enabled_irq_mask;
struct intel_uncore {
spinlock_t lock;
} uncore;
/* only to allow build, not used functionally */
struct {
unsigned int hpll_freq;
unsigned int czclk_freq;
unsigned int fsb_freq, mem_freq, is_ddr3;
u8 vblank_enabled;
};
struct {
const char *dmc_firmware_path;
} params;
void *pxp;
#endif
}; };
/** /**
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "xe_display.h"
#include "regs/xe_regs.h"
#include <linux/fb.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/xe_drm.h>
#include "soc/intel_dram.h"
#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
#include "intel_acpi.h"
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_display.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_fbdev.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_opregion.h"
#include "xe_module.h"
/* Xe device functions */
static bool has_display(struct xe_device *xe)
{
return HAS_DISPLAY(xe);
}
/**
* xe_display_driver_probe_defer - Detect if we need to wait for other drivers
* early on
* @pdev: PCI device
*
* Returns: true if probe needs to be deferred, false otherwise
*/
bool xe_display_driver_probe_defer(struct pci_dev *pdev)
{
if (!enable_display)
return 0;
return intel_display_driver_probe_defer(pdev);
}
static void xe_display_last_close(struct drm_device *dev)
{
struct xe_device *xe = to_xe_device(dev);
if (xe->info.enable_display)
intel_fbdev_restore_mode(to_xe_device(dev));
}
/**
* xe_display_driver_set_hooks - Add driver flags and hooks for display
* @driver: DRM device driver
*
* Set features and function hooks in @driver that are needed for driving the
* display IP. This sets the driver's capability of driving display, regardless
* if the device has it enabled
*/
void xe_display_driver_set_hooks(struct drm_driver *driver)
{
if (!enable_display)
return;
driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
driver->lastclose = xe_display_last_close;
}
static void unset_display_features(struct xe_device *xe)
{
xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
}
static void display_destroy(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
destroy_workqueue(xe->display.hotplug.dp_wq);
}
/**
* xe_display_create - create display struct
* @xe: XE device instance
*
* Initialize all fields used by the display part.
*
* TODO: once everything can be inside a single struct, make the struct opaque
* to the rest of xe and return it to be xe->display.
*
* Returns: 0 on success
*/
int xe_display_create(struct xe_device *xe)
{
int err;
spin_lock_init(&xe->display.fb_tracking.lock);
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
drmm_mutex_init(&xe->drm, &xe->sb_lock);
drmm_mutex_init(&xe->drm, &xe->display.backlight.lock);
drmm_mutex_init(&xe->drm, &xe->display.audio.mutex);
drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex);
drmm_mutex_init(&xe->drm, &xe->display.pps.mutex);
drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex);
xe->enabled_irq_mask = ~0;
err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
if (err)
return err;
return 0;
}
static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
if (!xe->info.enable_display)
return;
intel_power_domains_cleanup(xe);
}
int xe_display_init_nommio(struct xe_device *xe)
{
int err;
if (!xe->info.enable_display)
return 0;
/* Fake uncore lock */
spin_lock_init(&xe->uncore.lock);
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(xe);
err = intel_power_domains_init(xe);
if (err)
return err;
return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
}
static void xe_display_fini_noirq(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
if (!xe->info.enable_display)
return;
intel_display_driver_remove_noirq(xe);
intel_power_domains_driver_remove(xe);
}
int xe_display_init_noirq(struct xe_device *xe)
{
int err;
if (!xe->info.enable_display)
return 0;
intel_display_driver_early_probe(xe);
/* Early display init.. */
intel_opregion_setup(xe);
/*
* Fill the dram structure to get the system dram info. This will be
* used for memory latency calculation.
*/
intel_dram_detect(xe);
intel_bw_init_hw(xe);
intel_display_device_info_runtime_init(xe);
err = intel_display_driver_probe_noirq(xe);
if (err)
return err;
return drmm_add_action_or_reset(&xe->drm, xe_display_fini_noirq, NULL);
}
static void xe_display_fini_noaccel(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
if (!xe->info.enable_display)
return;
intel_display_driver_remove_nogem(xe);
}
int xe_display_init_noaccel(struct xe_device *xe)
{
int err;
if (!xe->info.enable_display)
return 0;
err = intel_display_driver_probe_nogem(xe);
if (err)
return err;
return drmm_add_action_or_reset(&xe->drm, xe_display_fini_noaccel, NULL);
}
int xe_display_init(struct xe_device *xe)
{
if (!xe->info.enable_display)
return 0;
return intel_display_driver_probe(xe);
}
void xe_display_fini(struct xe_device *xe)
{
if (!xe->info.enable_display)
return;
/* poll work can call into fbdev, hence clean that up afterwards */
intel_hpd_poll_fini(xe);
intel_fbdev_fini(xe);
intel_hdcp_component_fini(xe);
intel_audio_deinit(xe);
}
void xe_display_register(struct xe_device *xe)
{
if (!xe->info.enable_display)
return;
intel_display_driver_register(xe);
intel_register_dsm_handler();
intel_power_domains_enable(xe);
}
void xe_display_unregister(struct xe_device *xe)
{
if (!xe->info.enable_display)
return;
intel_unregister_dsm_handler();
intel_power_domains_disable(xe);
intel_display_driver_unregister(xe);
}
void xe_display_driver_remove(struct xe_device *xe)
{
if (!xe->info.enable_display)
return;
intel_display_driver_remove(xe);
intel_display_device_remove(xe);
}
/* IRQ-related functions */
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
if (!xe->info.enable_display)
return;
if (master_ctl & DISPLAY_IRQ)
gen11_display_irq_handler(xe);
}
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
{
if (!xe->info.enable_display)
return;
if (gu_misc_iir & GU_MISC_GSE)
intel_opregion_asle_intr(xe);
}
void xe_display_irq_reset(struct xe_device *xe)
{
if (!xe->info.enable_display)
return;
gen11_display_irq_reset(xe);
}
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
if (!xe->info.enable_display)
return;
if (gt->info.id == XE_GT0)
gen11_de_irq_postinstall(xe);
}
static void intel_suspend_encoders(struct xe_device *xe)
{
struct drm_device *dev = &xe->drm;
struct intel_encoder *encoder;
if (has_display(xe))
return;
drm_modeset_lock_all(dev);
for_each_intel_encoder(dev, encoder)
if (encoder->suspend)
encoder->suspend(encoder);
drm_modeset_unlock_all(dev);
}
void xe_display_pm_suspend(struct xe_device *xe)
{
if (!xe->info.enable_display)
return;
/*
* We do a lot of poking in a lot of registers, make sure they work
* properly.
*/
intel_power_domains_disable(xe);
if (has_display(xe))
drm_kms_helper_poll_disable(&xe->drm);
intel_display_driver_suspend(xe);
intel_dp_mst_suspend(xe);
intel_hpd_cancel_work(xe);
intel_suspend_encoders(xe);
intel_opregion_suspend(xe, PCI_D3cold);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
intel_dmc_suspend(xe);
}
void xe_display_pm_suspend_late(struct xe_device *xe)
{
if (!xe->info.enable_display)
return;
intel_power_domains_suspend(xe, I915_DRM_SUSPEND_MEM);
intel_display_power_suspend_late(xe);
}
void xe_display_pm_resume_early(struct xe_device *xe)
{
if (!xe->info.enable_display)
return;
intel_display_power_resume_early(xe);
intel_power_domains_resume(xe);
}
void xe_display_pm_resume(struct xe_device *xe)
{
if (!xe->info.enable_display)
return;
intel_dmc_resume(xe);
if (has_display(xe))
drm_mode_config_reset(&xe->drm);
intel_display_driver_init_hw(xe);
intel_hpd_init(xe);
/* MST sideband requires HPD interrupts enabled */
intel_dp_mst_resume(xe);
intel_display_driver_resume(xe);
intel_hpd_poll_disable(xe);
if (has_display(xe))
drm_kms_helper_poll_enable(&xe->drm);
intel_opregion_resume(xe);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
intel_power_domains_enable(xe);
}
void xe_display_probe(struct xe_device *xe)
{
if (!xe->info.enable_display)
goto no_display;
intel_display_device_probe(xe);
if (has_display(xe))
return;
no_display:
xe->info.enable_display = false;
unset_display_features(xe);
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _XE_DISPLAY_H_
#define _XE_DISPLAY_H_
#include "xe_device.h"
struct drm_driver;
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
bool xe_display_driver_probe_defer(struct pci_dev *pdev);
void xe_display_driver_set_hooks(struct drm_driver *driver);
void xe_display_driver_remove(struct xe_device *xe);
int xe_display_create(struct xe_device *xe);
void xe_display_probe(struct xe_device *xe);
int xe_display_init_nommio(struct xe_device *xe);
int xe_display_init_noirq(struct xe_device *xe);
int xe_display_init_noaccel(struct xe_device *xe);
int xe_display_init(struct xe_device *xe);
void xe_display_fini(struct xe_device *xe);
void xe_display_register(struct xe_device *xe);
void xe_display_unregister(struct xe_device *xe);
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl);
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir);
void xe_display_irq_reset(struct xe_device *xe);
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
void xe_display_pm_suspend(struct xe_device *xe);
void xe_display_pm_suspend_late(struct xe_device *xe);
void xe_display_pm_resume_early(struct xe_device *xe);
void xe_display_pm_resume(struct xe_device *xe);
#else
static inline int xe_display_driver_probe_defer(struct pci_dev *pdev) { return 0; }
static inline void xe_display_driver_set_hooks(struct drm_driver *driver) { }
static inline void xe_display_driver_remove(struct xe_device *xe) {}
static inline int xe_display_create(struct xe_device *xe) { return 0; }
static inline void xe_display_probe(struct xe_device *xe) { }
static inline int xe_display_init_nommio(struct xe_device *xe) { return 0; }
static inline int xe_display_init_noirq(struct xe_device *xe) { return 0; }
static inline int xe_display_init_noaccel(struct xe_device *xe) { return 0; }
static inline int xe_display_init(struct xe_device *xe) { return 0; }
static inline void xe_display_fini(struct xe_device *xe) {}
static inline void xe_display_register(struct xe_device *xe) {}
static inline void xe_display_unregister(struct xe_device *xe) {}
static inline void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) {}
static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) {}
static inline void xe_display_irq_reset(struct xe_device *xe) {}
static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
static inline void xe_display_pm_suspend(struct xe_device *xe) {}
static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
static inline void xe_display_pm_resume(struct xe_device *xe) {}
#endif /* CONFIG_DRM_XE_DISPLAY */
#endif /* _XE_DISPLAY_H_ */
...@@ -338,9 +338,13 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) ...@@ -338,9 +338,13 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
} }
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end, u64 alignment) u64 start, u64 end)
{ {
int err; int err;
u64 alignment = XE_PAGE_SIZE;
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
alignment = SZ_64K;
if (XE_WARN_ON(bo->ggtt_node.size)) { if (XE_WARN_ON(bo->ggtt_node.size)) {
/* Someone's already inserted this BO in the GGTT */ /* Someone's already inserted this BO in the GGTT */
...@@ -364,26 +368,15 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, ...@@ -364,26 +368,15 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
return err; return err;
} }
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 ofs) int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{ {
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) { return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
if (XE_WARN_ON(!IS_ALIGNED(ofs, SZ_64K)) ||
XE_WARN_ON(!IS_ALIGNED(bo->size, SZ_64K)))
return -EINVAL;
}
return __xe_ggtt_insert_bo_at(ggtt, bo, ofs, ofs + bo->size, 0);
} }
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{ {
u64 alignment; return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
alignment = XE_PAGE_SIZE;
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
alignment = SZ_64K;
return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX, alignment);
} }
void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node) void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node)
......
...@@ -24,7 +24,8 @@ int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, ...@@ -24,7 +24,8 @@ int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt,
void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node); void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node);
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 ofs); int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end);
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p); int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "regs/xe_gt_regs.h" #include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h" #include "regs/xe_regs.h"
#include "xe_device.h" #include "xe_device.h"
#include "xe_display.h"
#include "xe_drv.h" #include "xe_drv.h"
#include "xe_gt.h" #include "xe_gt.h"
#include "xe_guc.h" #include "xe_guc.h"
...@@ -351,10 +352,14 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg) ...@@ -351,10 +352,14 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
gt_irq_handler(tile, master_ctl, intr_dw, identity); gt_irq_handler(tile, master_ctl, intr_dw, identity);
xe_display_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
xelp_intr_enable(xe, false); xelp_intr_enable(xe, false);
xe_display_irq_enable(xe, gu_misc_iir);
xe_pmu_irq_stats(xe); xe_pmu_irq_stats(xe);
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -444,11 +449,14 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) ...@@ -444,11 +449,14 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
* that get reported as Gunit GSE) would only be hooked up to * that get reported as Gunit GSE) would only be hooked up to
* the primary tile. * the primary tile.
*/ */
if (id == 0) if (id == 0) {
xe_display_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
}
} }
dg1_intr_enable(xe, false); dg1_intr_enable(xe, false);
xe_display_irq_enable(xe, gu_misc_iir);
xe_pmu_irq_stats(xe); xe_pmu_irq_stats(xe);
...@@ -542,6 +550,7 @@ static void xe_irq_reset(struct xe_device *xe) ...@@ -542,6 +550,7 @@ static void xe_irq_reset(struct xe_device *xe)
tile = xe_device_get_root_tile(xe); tile = xe_device_get_root_tile(xe);
mask_and_disable(tile, GU_MISC_IRQ_OFFSET); mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
xe_display_irq_reset(xe);
/* /*
* The tile's top-level status register should be the last one * The tile's top-level status register should be the last one
...@@ -556,6 +565,8 @@ static void xe_irq_reset(struct xe_device *xe) ...@@ -556,6 +565,8 @@ static void xe_irq_reset(struct xe_device *xe)
static void xe_irq_postinstall(struct xe_device *xe) static void xe_irq_postinstall(struct xe_device *xe)
{ {
xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
/* /*
* ASLE backlight operations are reported via GUnit GSE interrupts * ASLE backlight operations are reported via GUnit GSE interrupts
* on the root tile. * on the root tile.
......
...@@ -19,6 +19,10 @@ bool force_execlist = false; ...@@ -19,6 +19,10 @@ bool force_execlist = false;
module_param_named_unsafe(force_execlist, force_execlist, bool, 0444); module_param_named_unsafe(force_execlist, force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission"); MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
bool enable_display = true;
module_param_named(enable_display, enable_display, bool, 0444);
MODULE_PARM_DESC(enable_display, "Enable display");
u32 xe_force_vram_bar_size; u32 xe_force_vram_bar_size;
module_param_named(vram_bar_size, xe_force_vram_bar_size, uint, 0600); module_param_named(vram_bar_size, xe_force_vram_bar_size, uint, 0600);
MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)"); MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "regs/xe_regs.h" #include "regs/xe_regs.h"
#include "regs/xe_gt_regs.h" #include "regs/xe_gt_regs.h"
#include "xe_device.h" #include "xe_device.h"
#include "xe_display.h"
#include "xe_drv.h" #include "xe_drv.h"
#include "xe_gt.h" #include "xe_gt.h"
#include "xe_macros.h" #include "xe_macros.h"
...@@ -55,6 +56,7 @@ struct xe_device_desc { ...@@ -55,6 +56,7 @@ struct xe_device_desc {
u8 require_force_probe:1; u8 require_force_probe:1;
u8 is_dgfx:1; u8 is_dgfx:1;
u8 has_display:1;
u8 has_heci_gscfi:1; u8 has_heci_gscfi:1;
u8 has_llc:1; u8 has_llc:1;
...@@ -62,6 +64,9 @@ struct xe_device_desc { ...@@ -62,6 +64,9 @@ struct xe_device_desc {
u8 supports_mmio_ext:1; u8 supports_mmio_ext:1;
}; };
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow field overrides in table");
#define PLATFORM(x) \ #define PLATFORM(x) \
.platform = (x), \ .platform = (x), \
.platform_name = #x .platform_name = #x
...@@ -205,7 +210,8 @@ static const struct xe_device_desc tgl_desc = { ...@@ -205,7 +210,8 @@ static const struct xe_device_desc tgl_desc = {
.graphics = &graphics_xelp, .graphics = &graphics_xelp,
.media = &media_xem, .media = &media_xem,
PLATFORM(XE_TIGERLAKE), PLATFORM(XE_TIGERLAKE),
.has_llc = 1, .has_display = true,
.has_llc = true,
.require_force_probe = true, .require_force_probe = true,
}; };
...@@ -213,6 +219,7 @@ static const struct xe_device_desc rkl_desc = { ...@@ -213,6 +219,7 @@ static const struct xe_device_desc rkl_desc = {
.graphics = &graphics_xelp, .graphics = &graphics_xelp,
.media = &media_xem, .media = &media_xem,
PLATFORM(XE_ROCKETLAKE), PLATFORM(XE_ROCKETLAKE),
.has_display = true,
.has_llc = true, .has_llc = true,
.require_force_probe = true, .require_force_probe = true,
}; };
...@@ -223,7 +230,8 @@ static const struct xe_device_desc adl_s_desc = { ...@@ -223,7 +230,8 @@ static const struct xe_device_desc adl_s_desc = {
.graphics = &graphics_xelp, .graphics = &graphics_xelp,
.media = &media_xem, .media = &media_xem,
PLATFORM(XE_ALDERLAKE_S), PLATFORM(XE_ALDERLAKE_S),
.has_llc = 1, .has_display = true,
.has_llc = true,
.require_force_probe = true, .require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) { .subplatforms = (const struct xe_subplatform_desc[]) {
{ XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
...@@ -237,7 +245,8 @@ static const struct xe_device_desc adl_p_desc = { ...@@ -237,7 +245,8 @@ static const struct xe_device_desc adl_p_desc = {
.graphics = &graphics_xelp, .graphics = &graphics_xelp,
.media = &media_xem, .media = &media_xem,
PLATFORM(XE_ALDERLAKE_P), PLATFORM(XE_ALDERLAKE_P),
.has_llc = 1, .has_display = true,
.has_llc = true,
.require_force_probe = true, .require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) { .subplatforms = (const struct xe_subplatform_desc[]) {
{ XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
...@@ -249,7 +258,8 @@ static const struct xe_device_desc adl_n_desc = { ...@@ -249,7 +258,8 @@ static const struct xe_device_desc adl_n_desc = {
.graphics = &graphics_xelp, .graphics = &graphics_xelp,
.media = &media_xem, .media = &media_xem,
PLATFORM(XE_ALDERLAKE_N), PLATFORM(XE_ALDERLAKE_N),
.has_llc = 1, .has_display = true,
.has_llc = true,
.require_force_probe = true, .require_force_probe = true,
}; };
...@@ -261,6 +271,7 @@ static const struct xe_device_desc dg1_desc = { ...@@ -261,6 +271,7 @@ static const struct xe_device_desc dg1_desc = {
.media = &media_xem, .media = &media_xem,
DGFX_FEATURES, DGFX_FEATURES,
PLATFORM(XE_DG1), PLATFORM(XE_DG1),
.has_display = true,
.require_force_probe = true, .require_force_probe = true,
.has_heci_gscfi = 1, .has_heci_gscfi = 1,
}; };
...@@ -286,6 +297,7 @@ static const struct xe_device_desc ats_m_desc = { ...@@ -286,6 +297,7 @@ static const struct xe_device_desc ats_m_desc = {
.require_force_probe = true, .require_force_probe = true,
DG2_FEATURES, DG2_FEATURES,
.has_display = false,
}; };
static const struct xe_device_desc dg2_desc = { static const struct xe_device_desc dg2_desc = {
...@@ -294,12 +306,14 @@ static const struct xe_device_desc dg2_desc = { ...@@ -294,12 +306,14 @@ static const struct xe_device_desc dg2_desc = {
.require_force_probe = true, .require_force_probe = true,
DG2_FEATURES, DG2_FEATURES,
.has_display = true,
}; };
static const __maybe_unused struct xe_device_desc pvc_desc = { static const __maybe_unused struct xe_device_desc pvc_desc = {
.graphics = &graphics_xehpc, .graphics = &graphics_xehpc,
DGFX_FEATURES, DGFX_FEATURES,
PLATFORM(XE_PVC), PLATFORM(XE_PVC),
.has_display = false,
.require_force_probe = true, .require_force_probe = true,
.has_heci_gscfi = 1, .has_heci_gscfi = 1,
}; };
...@@ -308,6 +322,7 @@ static const struct xe_device_desc mtl_desc = { ...@@ -308,6 +322,7 @@ static const struct xe_device_desc mtl_desc = {
/* .graphics and .media determined via GMD_ID */ /* .graphics and .media determined via GMD_ID */
.require_force_probe = true, .require_force_probe = true,
PLATFORM(XE_METEORLAKE), PLATFORM(XE_METEORLAKE),
.has_display = true,
}; };
static const struct xe_device_desc lnl_desc = { static const struct xe_device_desc lnl_desc = {
...@@ -316,6 +331,7 @@ static const struct xe_device_desc lnl_desc = { ...@@ -316,6 +331,7 @@ static const struct xe_device_desc lnl_desc = {
}; };
#undef PLATFORM #undef PLATFORM
__diag_pop();
/* Map of GMD_ID values to graphics IP */ /* Map of GMD_ID values to graphics IP */
static struct gmdid_map graphics_ip_map[] = { static struct gmdid_map graphics_ip_map[] = {
...@@ -574,6 +590,9 @@ static int xe_info_init(struct xe_device *xe, ...@@ -574,6 +590,9 @@ static int xe_info_init(struct xe_device *xe,
xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
enable_display &&
desc->has_display;
/* /*
* All platforms have at least one primary GT. Any platform with media * All platforms have at least one primary GT. Any platform with media
* version 13 or higher has an additional dedicated media GT. And * version 13 or higher has an additional dedicated media GT. And
...@@ -668,6 +687,9 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -668,6 +687,9 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV; return -ENODEV;
} }
if (xe_display_driver_probe_defer(pdev))
return -EPROBE_DEFER;
xe = xe_device_create(pdev, ent); xe = xe_device_create(pdev, ent);
if (IS_ERR(xe)) if (IS_ERR(xe))
return PTR_ERR(xe); return PTR_ERR(xe);
...@@ -686,7 +708,9 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -686,7 +708,9 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) if (err)
goto err_pci_disable; goto err_pci_disable;
drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) dma_m_s:%d tc:%d gscfi:%d", xe_display_probe(xe);
drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d",
desc->platform_name, desc->platform_name,
subplatform_desc ? subplatform_desc->name : "", subplatform_desc ? subplatform_desc->name : "",
xe->info.devid, xe->info.revid, xe->info.devid, xe->info.revid,
...@@ -697,6 +721,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -697,6 +721,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
xe->info.media_name, xe->info.media_name,
xe->info.media_verx100 / 100, xe->info.media_verx100 / 100,
xe->info.media_verx100 % 100, xe->info.media_verx100 % 100,
str_yes_no(xe->info.enable_display),
xe->info.dma_mask_size, xe->info.tile_count, xe->info.dma_mask_size, xe->info.tile_count,
xe->info.has_heci_gscfi); xe->info.has_heci_gscfi);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "xe_bo_evict.h" #include "xe_bo_evict.h"
#include "xe_device.h" #include "xe_device.h"
#include "xe_device_sysfs.h" #include "xe_device_sysfs.h"
#include "xe_display.h"
#include "xe_ggtt.h" #include "xe_ggtt.h"
#include "xe_gt.h" #include "xe_gt.h"
#include "xe_guc.h" #include "xe_guc.h"
...@@ -61,14 +62,20 @@ int xe_pm_suspend(struct xe_device *xe) ...@@ -61,14 +62,20 @@ int xe_pm_suspend(struct xe_device *xe)
if (err) if (err)
return err; return err;
xe_display_pm_suspend(xe);
for_each_gt(gt, xe, id) { for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt); err = xe_gt_suspend(gt);
if (err) if (err) {
xe_display_pm_resume(xe);
return err; return err;
}
} }
xe_irq_suspend(xe); xe_irq_suspend(xe);
xe_display_pm_suspend_late(xe);
return 0; return 0;
} }
...@@ -94,6 +101,8 @@ int xe_pm_resume(struct xe_device *xe) ...@@ -94,6 +101,8 @@ int xe_pm_resume(struct xe_device *xe)
return err; return err;
} }
xe_display_pm_resume_early(xe);
/* /*
* This only restores pinned memory which is the memory required for the * This only restores pinned memory which is the memory required for the
* GT(s) to resume. * GT(s) to resume.
...@@ -104,6 +113,8 @@ int xe_pm_resume(struct xe_device *xe) ...@@ -104,6 +113,8 @@ int xe_pm_resume(struct xe_device *xe)
xe_irq_resume(xe); xe_irq_resume(xe);
xe_display_pm_resume(xe);
for_each_gt(gt, xe, id) for_each_gt(gt, xe, id)
xe_gt_resume(gt); xe_gt_resume(gt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment