Commit 8573df34 authored by Daniel Vetter's avatar Daniel Vetter

Merge tag 'drm-misc-next-fixes-2023-02-09' of...

Merge tag 'drm-misc-next-fixes-2023-02-09' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

Short summary of fixes pull:

Contains a number of fixes to vc4 and ivpu. The patches to the probe
helpers were cherry-picked from the regular development branch.
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/Y+S6HBmaRJNPYiBG@linux-uq9g
parents 48075a66 467fbc77
......@@ -16,6 +16,10 @@ properties:
compatible:
const: visionox,vtdr6130
reg:
maxItems: 1
description: DSI virtual channel
vddio-supply: true
vci-supply: true
vdd-supply: true
......@@ -26,6 +30,7 @@ additionalProperties: false
required:
- compatible
- reg
- vddio-supply
- vci-supply
- vdd-supply
......@@ -35,18 +40,23 @@ required:
examples:
- |
#include <dt-bindings/gpio/gpio.h>
panel {
compatible = "visionox,vtdr6130";
vddio-supply = <&vreg_l12b_1p8>;
vci-supply = <&vreg_l13b_3p0>;
vdd-supply = <&vreg_l11b_1p2>;
reset-gpios = <&tlmm 133 GPIO_ACTIVE_LOW>;
port {
panel0_in: endpoint {
remote-endpoint = <&dsi0_out>;
dsi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "visionox,vtdr6130";
reg = <0>;
vddio-supply = <&vreg_l12b_1p8>;
vci-supply = <&vreg_l13b_3p0>;
vdd-supply = <&vreg_l11b_1p2>;
reset-gpios = <&tlmm 133 GPIO_ACTIVE_LOW>;
port {
panel0_in: endpoint {
remote-endpoint = <&dsi0_out>;
};
};
};
};
......
......@@ -90,6 +90,7 @@ static void file_priv_release(struct kref *ref)
ivpu_cmdq_release_all(file_priv);
ivpu_bo_remove_all_bos_from_context(&file_priv->ctx);
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
mutex_destroy(&file_priv->lock);
......@@ -427,6 +428,7 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
return ret;
}
dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
......
......@@ -32,10 +32,11 @@
#define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31)
#define IVPU_FW_CHECK_API(vdev, fw_hdr, name) ivpu_fw_check_api(vdev, fw_hdr, #name, \
VPU_##name##_API_VER_INDEX, \
VPU_##name##_API_VER_MAJOR, \
VPU_##name##_API_VER_MINOR)
#define IVPU_FW_CHECK_API(vdev, fw_hdr, name, min_major) \
ivpu_fw_check_api(vdev, fw_hdr, #name, \
VPU_##name##_API_VER_INDEX, \
VPU_##name##_API_VER_MAJOR, \
VPU_##name##_API_VER_MINOR, min_major)
static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
......@@ -63,19 +64,27 @@ static int ivpu_fw_request(struct ivpu_device *vdev)
return ret;
}
static void
static int
ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
const char *str, int index, u16 expected_major, u16 expected_minor)
const char *str, int index, u16 expected_major, u16 expected_minor,
u16 min_major)
{
u16 major = (u16)(fw_hdr->api_version[index] >> 16);
u16 minor = (u16)(fw_hdr->api_version[index]);
if (major < min_major) {
ivpu_err(vdev, "Incompatible FW %s API version: %d.%d, required %d.0 or later\n",
str, major, minor, min_major);
return -EINVAL;
}
if (major != expected_major) {
ivpu_warn(vdev, "Incompatible FW %s API version: %d.%d (expected %d.%d)\n",
ivpu_warn(vdev, "Major FW %s API version different: %d.%d (expected %d.%d)\n",
str, major, minor, expected_major, expected_minor);
}
ivpu_dbg(vdev, FW_BOOT, "FW %s API version: %d.%d (expected %d.%d)\n",
str, major, minor, expected_major, expected_minor);
return 0;
}
static int ivpu_fw_parse(struct ivpu_device *vdev)
......@@ -131,6 +140,14 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
return -EINVAL;
}
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
ivpu_dbg(vdev, FW_BOOT, "FW version: %s\n", (char *)fw_hdr + VPU_FW_HEADER_SIZE);
if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
return -EINVAL;
if (IVPU_FW_CHECK_API(vdev, fw_hdr, JSM, 3))
return -EINVAL;
fw->runtime_addr = runtime_addr;
fw->runtime_size = runtime_size;
......@@ -141,16 +158,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->cold_boot_entry_point = fw_hdr->entry_point;
fw->entry_point = fw->cold_boot_entry_point;
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
fw->runtime_addr, image_load_addr, fw->entry_point);
ivpu_dbg(vdev, FW_BOOT, "FW version: %s\n", (char *)fw_hdr + VPU_FW_HEADER_SIZE);
IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT);
IVPU_FW_CHECK_API(vdev, fw_hdr, JSM);
return 0;
}
......
......@@ -42,9 +42,7 @@ static int prime_map_pages_locked(struct ivpu_bo *bo)
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct sg_table *sgt;
WARN_ON(!bo->base.import_attach);
sgt = dma_buf_map_attachment(bo->base.import_attach, DMA_BIDIRECTIONAL);
sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt));
return PTR_ERR(sgt);
......@@ -56,9 +54,7 @@ static int prime_map_pages_locked(struct ivpu_bo *bo)
static void prime_unmap_pages_locked(struct ivpu_bo *bo)
{
WARN_ON(!bo->base.import_attach);
dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
bo->sgt = NULL;
}
......
......@@ -400,8 +400,9 @@ static int ivpu_direct_job_submission(struct ivpu_job *job)
if (ret)
goto err_xa_erase;
ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d next %d\n",
job->job_id, file_priv->ctx.id, job->engine_idx, cmdq->jobq->header.tail);
ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
job->engine_idx, cmdq->jobq->header.tail);
if (ivpu_test_mode == IVPU_TEST_MODE_NULL_HW) {
ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
......
......@@ -167,3 +167,14 @@ int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 tra
return ret;
}
int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
struct vpu_jsm_msg resp;
req.payload.ssid_release.host_ssid = host_ssid;
return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
}
......@@ -19,5 +19,5 @@ int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destinati
u64 *trace_hw_component_mask);
int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
u64 trace_hw_component_mask);
int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid);
#endif
......@@ -17,12 +17,12 @@
/*
* Major version changes that break backward compatibility
*/
#define VPU_JSM_API_VER_MAJOR 2
#define VPU_JSM_API_VER_MAJOR 3
/*
* Minor version changes when API backward compatibility is preserved.
*/
#define VPU_JSM_API_VER_MINOR 10
#define VPU_JSM_API_VER_MINOR 0
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
......@@ -103,10 +103,10 @@
/*
* Max length (including trailing NULL char) of a dyndbg command.
*
* NOTE: 112 is used so that the size of 'struct vpu_ipc_msg' in the JSM API is
* NOTE: 96 is used so that the size of 'struct vpu_ipc_msg' in the JSM API is
* 128 bytes (multiple of 64 bytes, the cache line size).
*/
#define VPU_DYNDBG_CMD_MAX_LEN 112
#define VPU_DYNDBG_CMD_MAX_LEN 96
/*
* Job format.
......@@ -119,7 +119,7 @@ struct vpu_job_queue_entry {
u64 root_page_table_update_counter; /**< Page tables update events counter */
u64 preemption_buffer_address; /**< Address of the preemption buffer to use for this job */
u64 preemption_buffer_size; /**< Size of the preemption buffer to use for this job */
u8 reserved[VPU_JOB_RESERVED_BYTES];
u8 reserved_0[VPU_JOB_RESERVED_BYTES];
};
/*
......@@ -129,7 +129,7 @@ struct vpu_job_queue_header {
u32 engine_idx;
u32 head;
u32 tail;
u8 reserved[VPU_JOB_QUEUE_RESERVED_BYTES];
u8 reserved_0[VPU_JOB_QUEUE_RESERVED_BYTES];
};
/*
......@@ -319,6 +319,8 @@ enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED };
struct vpu_ipc_msg_payload_engine_reset {
/* Engine to be reset. */
u32 engine_idx;
/* Reserved */
u32 reserved_0;
};
struct vpu_ipc_msg_payload_engine_preempt {
......@@ -336,6 +338,8 @@ struct vpu_ipc_msg_payload_engine_preempt {
struct vpu_ipc_msg_payload_register_db {
/* Index of the doorbell to register. */
u32 db_idx;
/* Reserved */
u32 reserved_0;
/* Virtual address in Global GTT pointing to the start of job queue. */
u64 jobq_base;
/* Size of the job queue in bytes. */
......@@ -352,11 +356,15 @@ struct vpu_ipc_msg_payload_register_db {
struct vpu_ipc_msg_payload_unregister_db {
/* Index of the doorbell to unregister. */
u32 db_idx;
/* Reserved */
u32 reserved_0;
};
struct vpu_ipc_msg_payload_query_engine_hb {
/* Engine to return heartbeat value. */
u32 engine_idx;
/* Reserved */
u32 reserved_0;
};
struct vpu_ipc_msg_payload_power_level {
......@@ -371,11 +379,15 @@ struct vpu_ipc_msg_payload_power_level {
* considered to be valid.
*/
u32 power_level;
/* Reserved */
u32 reserved_0;
};
struct vpu_ipc_msg_payload_ssid_release {
/* Host sub-stream ID for the context to be released. */
u32 host_ssid;
/* Reserved */
u32 reserved_0;
};
/**
......@@ -425,9 +437,6 @@ struct vpu_jsm_metric_streamer_start {
u64 next_buffer_size;
};
static_assert(sizeof(struct vpu_jsm_metric_streamer_start) % 8 == 0,
"vpu_jsm_metric_streamer_start is misaligned");
/**
* @brief Metric streamer stop command structure.
* @see VPU_JSM_MSG_METRIC_STREAMER_STOP
......@@ -437,9 +446,6 @@ struct vpu_jsm_metric_streamer_stop {
u64 metric_group_mask;
};
static_assert(sizeof(struct vpu_jsm_metric_streamer_stop) % 8 == 0,
"vpu_jsm_metric_streamer_stop is misaligned");
/**
* Provide VPU FW with buffers to write metric data.
* @see VPU_JSM_MSG_METRIC_STREAMER_UPDATE
......@@ -471,9 +477,6 @@ struct vpu_jsm_metric_streamer_update {
u64 next_buffer_size;
};
static_assert(sizeof(struct vpu_jsm_metric_streamer_update) % 8 == 0,
"vpu_jsm_metric_streamer_update is misaligned");
struct vpu_ipc_msg_payload_blob_deinit {
/* 64-bit unique ID for the blob to be de-initialized. */
u64 blob_id;
......@@ -491,7 +494,7 @@ struct vpu_ipc_msg_payload_job_done {
/* Host SSID */
u32 host_ssid;
/* Zero Padding */
u32 reserved;
u32 reserved_0;
/* Command queue id */
u64 cmdq_id;
};
......@@ -500,7 +503,7 @@ struct vpu_jsm_engine_reset_context {
/* Host SSID */
u32 host_ssid;
/* Zero Padding */
u32 reserved;
u32 reserved_0;
/* Command queue id */
u64 cmdq_id;
/* Flags: 0: cause of hang; 1: collateral damage of reset */
......@@ -533,6 +536,8 @@ struct vpu_ipc_msg_payload_engine_preempt_done {
struct vpu_ipc_msg_payload_register_db_done {
/* Index of the registered doorbell. */
u32 db_idx;
/* Reserved */
u32 reserved_0;
};
/**
......@@ -543,11 +548,15 @@ struct vpu_ipc_msg_payload_register_db_done {
struct vpu_ipc_msg_payload_unregister_db_done {
/* Index of the unregistered doorbell. */
u32 db_idx;
/* Reserved */
u32 reserved_0;
};
struct vpu_ipc_msg_payload_query_engine_hb_done {
/* Engine returning heartbeat value. */
u32 engine_idx;
/* Reserved */
u32 reserved_0;
/* Heartbeat value. */
u64 heartbeat;
};
......@@ -559,6 +568,8 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
* implementations.
*/
u32 power_level_count;
/* Reserved */
u32 reserved_0;
/**
* Power consumption limit for each supported power level in
* [0-100%] range relative to power level 0.
......@@ -577,7 +588,7 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* Grace period in 100ns units when preempting another priority band for
* this priority band
*/
u64 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
/*
* Default quantum in 100ns units for scheduling across processes
* within a priority band
......@@ -593,6 +604,8 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* in situations when it's starved by the focus band.
*/
u32 normal_band_percentage;
/* Reserved */
u32 reserved_0;
};
/* HWS create command queue request */
......@@ -609,6 +622,8 @@ struct vpu_ipc_msg_payload_hws_create_cmdq {
u64 cmdq_base;
/* Command queue size */
u32 cmdq_size;
/* Reserved */
u32 reserved_0;
};
/* HWS create command queue response */
......@@ -806,9 +821,6 @@ struct vpu_jsm_metric_streamer_done {
u64 bytes_written;
};
static_assert(sizeof(struct vpu_jsm_metric_streamer_done) % 8 == 0,
"vpu_jsm_metric_streamer_done is misaligned");
/**
* Metric group description placed in the metric buffer after successful completion
* of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more
......@@ -848,16 +860,13 @@ struct vpu_jsm_metric_group_descriptor {
u32 name_string_size;
/** Counter description string size, @see name_string_size */
u32 description_string_size;
u32 reserved_0[2];
u64 reserved_0;
/**
* Right after this structure, the VPU writes name and description of
* the metric group.
*/
};
static_assert(sizeof(struct vpu_jsm_metric_group_descriptor) % 8 == 0,
"vpu_jsm_metric_group_descriptor is misaligned");
/**
* Metric counter description, placed in the buffer after vpu_jsm_metric_group_descriptor.
* @see VPU_JSM_MSG_METRIC_STREAMER_INFO
......@@ -894,16 +903,13 @@ struct vpu_jsm_metric_counter_descriptor {
u32 component_string_size;
/** Counter string size, @see name_string_size */
u32 units_string_size;
u32 reserved_0[2];
u64 reserved_0;
/**
* Right after this structure, the VPU writes name, description
* component and unit strings.
*/
};
static_assert(sizeof(struct vpu_jsm_metric_counter_descriptor) % 8 == 0,
"vpu_jsm_metric_counter_descriptor is misaligned");
/**
* Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests.
*
......@@ -977,6 +983,8 @@ union vpu_ipc_msg_payload {
* to allow proper handling of VPU cache operations.
*/
struct vpu_jsm_msg {
/* Reserved */
u64 reserved_0;
/* Message type, see vpu_ipc_msg_type enum. */
u32 type;
/* Buffer status, see vpu_ipc_msg_status enum. */
......@@ -988,6 +996,7 @@ struct vpu_jsm_msg {
u32 request_id;
/* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
u32 result;
u64 reserved_1;
/* Message payload depending on message type, see vpu_ipc_msg_payload union. */
union vpu_ipc_msg_payload payload;
};
......
......@@ -222,6 +222,45 @@ drm_connector_mode_valid(struct drm_connector *connector,
return ret;
}
static void drm_kms_helper_disable_hpd(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
const struct drm_connector_helper_funcs *funcs =
connector->helper_private;
if (funcs && funcs->disable_hpd)
funcs->disable_hpd(connector);
}
drm_connector_list_iter_end(&conn_iter);
}
static bool drm_kms_helper_enable_hpd(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
const struct drm_connector_helper_funcs *funcs =
connector->helper_private;
if (funcs && funcs->enable_hpd)
funcs->enable_hpd(connector);
if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
drm_connector_list_iter_end(&conn_iter);
return poll;
}
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
/**
* drm_kms_helper_poll_enable - re-enable output polling.
......@@ -241,26 +280,13 @@ drm_connector_mode_valid(struct drm_connector *connector,
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
dev->mode_config.poll_running)
return;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
const struct drm_connector_helper_funcs *funcs =
connector->helper_private;
if (funcs && funcs->enable_hpd)
funcs->enable_hpd(connector);
if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
drm_connector_list_iter_end(&conn_iter);
poll = drm_kms_helper_enable_hpd(dev);
if (dev->mode_config.delayed_event) {
/*
......@@ -279,6 +305,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
dev->mode_config.poll_running = true;
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
......@@ -567,10 +595,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
}
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
drm_kms_helper_poll_enable(dev);
dev->mode_config.poll_running = drm_kms_helper_poll;
drm_kms_helper_poll_enable(dev);
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
......@@ -710,8 +735,11 @@ static void output_poll_execute(struct work_struct *work)
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
if (!drm_kms_helper_poll)
if (!drm_kms_helper_poll && dev->mode_config.poll_running) {
drm_kms_helper_disable_hpd(dev);
dev->mode_config.poll_running = false;
goto out;
}
if (!mutex_trylock(&dev->mode_config.mutex)) {
repoll = true;
......@@ -808,30 +836,6 @@ bool drm_kms_helper_is_poll_worker(void)
}
EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
static void drm_kms_helper_poll_disable_fini(struct drm_device *dev, bool fini)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
if (!dev->mode_config.poll_enabled)
return;
if (fini)
dev->mode_config.poll_enabled = false;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
const struct drm_connector_helper_funcs *funcs =
connector->helper_private;
if (funcs && funcs->disable_hpd)
funcs->disable_hpd(connector);
}
drm_connector_list_iter_end(&conn_iter);
cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
/**
* drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device
......@@ -848,7 +852,12 @@ static void drm_kms_helper_poll_disable_fini(struct drm_device *dev, bool fini)
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
drm_kms_helper_poll_disable_fini(dev, false);
if (dev->mode_config.poll_running)
drm_kms_helper_disable_hpd(dev);
cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
dev->mode_config.poll_running = false;
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
......@@ -886,7 +895,12 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
*/
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
drm_kms_helper_poll_disable_fini(dev, true);
if (!dev->mode_config.poll_enabled)
return;
drm_kms_helper_poll_disable(dev);
dev->mode_config.poll_enabled = false;
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
......
......@@ -36,7 +36,7 @@ config DRM_VC4_HDMI_CEC
and want to use CEC.
config DRM_VC4_KUNIT_TEST
bool "KUnit tests for VC4" if !KUNIT_ALL_TESTS
tristate "KUnit tests for VC4" if !KUNIT_ALL_TESTS
depends on DRM_VC4 && KUNIT
select DRM_KUNIT_TEST_HELPERS
default KUNIT_ALL_TESTS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment