Commit a80314c3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2021-04-02' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Things have settled down in time for Easter, a random smattering of
  small fixes across a few drivers.

  I'm guessing though there might be some i915 and misc fixes out there
  I haven't gotten yet, but since today is a public holiday here, I'm
  sending this early so I can have the day off, I'll see if more
  requests come in and decide what to do with them later.

  amdgpu:
   - Polaris idle power fix
   - VM fix
   - Vangogh S3 fix
   - Fixes for non-4K page sizes

  amdkfd:
   - dqm fence memory corruption fix

  tegra:
   - lockdep warning fix
   - runtine PM reference fix
   - display controller fix
   - PLL Fix

  imx:
   - memory leak in error path fix
   - LDB driver channel registration fix
   - oob array warning in LDB driver

  exynos
   - unused header file removal"

* tag 'drm-fixes-2021-04-02' of git://anongit.freedesktop.org/drm/drm:
  drm/amdgpu: check alignment on CPU page for bo map
  drm/amdgpu: Set a suitable dev_info.gart_page_size
  drm/amdgpu/vangogh: don't check for dpm in is_dpm_running when in suspend
  drm/amdkfd: dqm fence memory corruption
  drm/tegra: sor: Grab runtime PM reference across reset
  drm/tegra: dc: Restore coupling of display controllers
  gpu: host1x: Use different lock classes for each client
  drm/tegra: dc: Don't set PLL clock to 0Hz
  drm/amdgpu: fix offset calculation in amdgpu_vm_bo_clear_mappings()
  drm/amd/pm: no need to force MCLK to highest when no display connected
  drm/exynos/decon5433: Remove the unused include statements
  drm/imx: imx-ldb: fix out of bounds array access warning
  drm/imx: imx-ldb: Register LDB channel1 when it is the only channel to be used
  drm/imx: fix memory leak when fails to init
parents ffd9fb54 6fdb8e5a
...@@ -778,9 +778,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -778,9 +778,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->high_va_offset = AMDGPU_GMC_HOLE_END; dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size; dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
} }
dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE; dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info->cu_active_number = adev->gfx.cu_info.number; dev_info->cu_active_number = adev->gfx.cu_info.number;
dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info->ce_ram_size = adev->gfx.ce_ram_size; dev_info->ce_ram_size = adev->gfx.ce_ram_size;
......
...@@ -2197,8 +2197,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -2197,8 +2197,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t eaddr; uint64_t eaddr;
/* validate the parameters */ /* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
size == 0 || size & AMDGPU_GPU_PAGE_MASK) size == 0 || size & ~PAGE_MASK)
return -EINVAL; return -EINVAL;
/* make sure object fit at this offset */ /* make sure object fit at this offset */
...@@ -2263,8 +2263,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, ...@@ -2263,8 +2263,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
int r; int r;
/* validate the parameters */ /* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
size == 0 || size & AMDGPU_GPU_PAGE_MASK) size == 0 || size & ~PAGE_MASK)
return -EINVAL; return -EINVAL;
/* make sure object fit at this offset */ /* make sure object fit at this offset */
...@@ -2409,7 +2409,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, ...@@ -2409,7 +2409,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
after->start = eaddr + 1; after->start = eaddr + 1;
after->last = tmp->last; after->last = tmp->last;
after->offset = tmp->offset; after->offset = tmp->offset;
after->offset += after->start - tmp->start; after->offset += (after->start - tmp->start) << PAGE_SHIFT;
after->flags = tmp->flags; after->flags = tmp->flags;
after->bo_va = tmp->bo_va; after->bo_va = tmp->bo_va;
list_add(&after->list, &tmp->bo_va->invalids); list_add(&after->list, &tmp->bo_va->invalids);
......
...@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, ...@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
/* Wait till CP writes sync code: */ /* Wait till CP writes sync code: */
status = amdkfd_fence_wait_timeout( status = amdkfd_fence_wait_timeout(
(unsigned int *) rm_state, rm_state,
QUEUESTATE__ACTIVE, 1500); QUEUESTATE__ACTIVE, 1500);
kfd_gtt_sa_free(dbgdev->dev, mem_obj); kfd_gtt_sa_free(dbgdev->dev, mem_obj);
......
...@@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm) ...@@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
if (retval) if (retval)
goto fail_allocate_vidmem; goto fail_allocate_vidmem;
dqm->fence_addr = dqm->fence_mem->cpu_ptr; dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
init_interrupts(dqm); init_interrupts(dqm);
...@@ -1340,8 +1340,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -1340,8 +1340,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
return retval; return retval;
} }
int amdkfd_fence_wait_timeout(unsigned int *fence_addr, int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
unsigned int fence_value, uint64_t fence_value,
unsigned int timeout_ms) unsigned int timeout_ms)
{ {
unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
......
...@@ -192,7 +192,7 @@ struct device_queue_manager { ...@@ -192,7 +192,7 @@ struct device_queue_manager {
uint16_t vmid_pasid[VMID_NUM]; uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr; uint64_t pipelines_addr;
uint64_t fence_gpu_addr; uint64_t fence_gpu_addr;
unsigned int *fence_addr; uint64_t *fence_addr;
struct kfd_mem_obj *fence_mem; struct kfd_mem_obj *fence_mem;
bool active_runlist; bool active_runlist;
int sched_policy; int sched_policy;
......
...@@ -347,7 +347,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) ...@@ -347,7 +347,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
} }
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint32_t fence_value) uint64_t fence_value)
{ {
uint32_t *buffer, size; uint32_t *buffer, size;
int retval = 0; int retval = 0;
......
...@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, ...@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
} }
static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint32_t fence_value) uint64_t fence_address, uint64_t fence_value)
{ {
struct pm4_mes_query_status *packet; struct pm4_mes_query_status *packet;
......
...@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, ...@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
} }
static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer, static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint32_t fence_value) uint64_t fence_address, uint64_t fence_value)
{ {
struct pm4_mes_query_status *packet; struct pm4_mes_query_status *packet;
......
...@@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm, ...@@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
u32 *ctl_stack_used_size, u32 *ctl_stack_used_size,
u32 *save_area_used_size); u32 *save_area_used_size);
int amdkfd_fence_wait_timeout(unsigned int *fence_addr, int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
unsigned int fence_value, uint64_t fence_value,
unsigned int timeout_ms); unsigned int timeout_ms);
/* Packet Manager */ /* Packet Manager */
...@@ -1040,7 +1040,7 @@ struct packet_manager_funcs { ...@@ -1040,7 +1040,7 @@ struct packet_manager_funcs {
uint32_t filter_param, bool reset, uint32_t filter_param, bool reset,
unsigned int sdma_engine); unsigned int sdma_engine);
int (*query_status)(struct packet_manager *pm, uint32_t *buffer, int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint32_t fence_value); uint64_t fence_address, uint64_t fence_value);
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
/* Packet sizes */ /* Packet sizes */
...@@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm, ...@@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res); struct scheduling_resources *res);
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint32_t fence_value); uint64_t fence_value);
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
enum kfd_unmap_queues_filter mode, enum kfd_unmap_queues_filter mode,
......
...@@ -3330,7 +3330,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, ...@@ -3330,7 +3330,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) && disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
!hwmgr->display_config->multi_monitor_in_sync) || !hwmgr->display_config->multi_monitor_in_sync) ||
smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time); (hwmgr->display_config->num_display &&
smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
disable_mclk_switching = disable_mclk_switching_for_frame_lock || disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
disable_mclk_switching_for_display; disable_mclk_switching_for_display;
......
...@@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) ...@@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
static bool vangogh_is_dpm_running(struct smu_context *smu) static bool vangogh_is_dpm_running(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev;
int ret = 0; int ret = 0;
uint32_t feature_mask[2]; uint32_t feature_mask[2];
uint64_t feature_enabled; uint64_t feature_enabled;
/* we need to re-init after suspend so return false */
if (adev->in_suspend)
return false;
ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
if (ret) if (ret)
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/mfd/syscon.h> #include <linux/mfd/syscon.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/regmap.h> #include <linux/regmap.h>
......
...@@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev) ...@@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev)
ret = drmm_mode_config_init(drm); ret = drmm_mode_config_init(drm);
if (ret) if (ret)
return ret; goto err_kms;
ret = drm_vblank_init(drm, MAX_CRTC); ret = drm_vblank_init(drm, MAX_CRTC);
if (ret) if (ret)
......
...@@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) ...@@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
return;
}
drm_panel_prepare(imx_ldb_ch->panel); drm_panel_prepare(imx_ldb_ch->panel);
if (dual) { if (dual) {
...@@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder, ...@@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
u32 bus_format = imx_ldb_ch->bus_format; u32 bus_format = imx_ldb_ch->bus_format;
if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
return;
}
if (mode->clock > 170000) { if (mode->clock > 170000) {
dev_warn(ldb->dev, dev_warn(ldb->dev,
"%s: mode exceeds 170 MHz pixel clock\n", __func__); "%s: mode exceeds 170 MHz pixel clock\n", __func__);
...@@ -583,7 +593,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) ...@@ -583,7 +593,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
struct imx_ldb_channel *channel = &imx_ldb->channel[i]; struct imx_ldb_channel *channel = &imx_ldb->channel[i];
if (!channel->ldb) if (!channel->ldb)
break; continue;
ret = imx_ldb_register(drm, channel); ret = imx_ldb_register(drm, channel);
if (ret) if (ret)
......
...@@ -1688,6 +1688,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, ...@@ -1688,6 +1688,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
dev_err(dc->dev, dev_err(dc->dev,
"failed to set clock rate to %lu Hz\n", "failed to set clock rate to %lu Hz\n",
state->pclk); state->pclk);
err = clk_set_rate(dc->clk, state->pclk);
if (err < 0)
dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
dc->clk, state->pclk, err);
} }
DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
...@@ -1698,11 +1703,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, ...@@ -1698,11 +1703,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
} }
err = clk_set_rate(dc->clk, state->pclk);
if (err < 0)
dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
dc->clk, state->pclk, err);
} }
static void tegra_dc_stop(struct tegra_dc *dc) static void tegra_dc_stop(struct tegra_dc *dc)
...@@ -2501,22 +2501,18 @@ static int tegra_dc_couple(struct tegra_dc *dc) ...@@ -2501,22 +2501,18 @@ static int tegra_dc_couple(struct tegra_dc *dc)
* POWER_CONTROL registers during CRTC enabling. * POWER_CONTROL registers during CRTC enabling.
*/ */
if (dc->soc->coupled_pm && dc->pipe == 1) { if (dc->soc->coupled_pm && dc->pipe == 1) {
u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER; struct device *companion;
struct device_link *link; struct tegra_dc *parent;
struct device *partner;
partner = driver_find_device(dc->dev->driver, NULL, NULL, companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
tegra_dc_match_by_pipe); tegra_dc_match_by_pipe);
if (!partner) if (!companion)
return -EPROBE_DEFER; return -EPROBE_DEFER;
link = device_link_add(dc->dev, partner, flags); parent = dev_get_drvdata(companion);
if (!link) { dc->client.parent = &parent->client;
dev_err(dc->dev, "failed to link controllers\n");
return -EINVAL;
}
dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner)); dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
} }
return 0; return 0;
......
...@@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client) ...@@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client)
* kernel is possible. * kernel is possible.
*/ */
if (sor->rst) { if (sor->rst) {
err = pm_runtime_resume_and_get(sor->dev);
if (err < 0) {
dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
return err;
}
err = reset_control_acquire(sor->rst); err = reset_control_acquire(sor->rst);
if (err < 0) { if (err < 0) {
dev_err(sor->dev, "failed to acquire SOR reset: %d\n", dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
...@@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client) ...@@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client)
} }
reset_control_release(sor->rst); reset_control_release(sor->rst);
pm_runtime_put(sor->dev);
} }
err = clk_prepare_enable(sor->clk_safe); err = clk_prepare_enable(sor->clk_safe);
......
...@@ -705,8 +705,9 @@ void host1x_driver_unregister(struct host1x_driver *driver) ...@@ -705,8 +705,9 @@ void host1x_driver_unregister(struct host1x_driver *driver)
EXPORT_SYMBOL(host1x_driver_unregister); EXPORT_SYMBOL(host1x_driver_unregister);
/** /**
* host1x_client_register() - register a host1x client * __host1x_client_register() - register a host1x client
* @client: host1x client * @client: host1x client
* @key: lock class key for the client-specific mutex
* *
* Registers a host1x client with each host1x controller instance. Note that * Registers a host1x client with each host1x controller instance. Note that
* each client will only match their parent host1x controller and will only be * each client will only match their parent host1x controller and will only be
...@@ -715,13 +716,14 @@ EXPORT_SYMBOL(host1x_driver_unregister); ...@@ -715,13 +716,14 @@ EXPORT_SYMBOL(host1x_driver_unregister);
* device and call host1x_device_init(), which will in turn call each client's * device and call host1x_device_init(), which will in turn call each client's
* &host1x_client_ops.init implementation. * &host1x_client_ops.init implementation.
*/ */
int host1x_client_register(struct host1x_client *client) int __host1x_client_register(struct host1x_client *client,
struct lock_class_key *key)
{ {
struct host1x *host1x; struct host1x *host1x;
int err; int err;
INIT_LIST_HEAD(&client->list); INIT_LIST_HEAD(&client->list);
mutex_init(&client->lock); __mutex_init(&client->lock, "host1x client lock", key);
client->usecount = 0; client->usecount = 0;
mutex_lock(&devices_lock); mutex_lock(&devices_lock);
...@@ -742,7 +744,7 @@ int host1x_client_register(struct host1x_client *client) ...@@ -742,7 +744,7 @@ int host1x_client_register(struct host1x_client *client)
return 0; return 0;
} }
EXPORT_SYMBOL(host1x_client_register); EXPORT_SYMBOL(__host1x_client_register);
/** /**
* host1x_client_unregister() - unregister a host1x client * host1x_client_unregister() - unregister a host1x client
......
...@@ -320,7 +320,14 @@ static inline struct host1x_device *to_host1x_device(struct device *dev) ...@@ -320,7 +320,14 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device); int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device); int host1x_device_exit(struct host1x_device *device);
int host1x_client_register(struct host1x_client *client); int __host1x_client_register(struct host1x_client *client,
struct lock_class_key *key);
#define host1x_client_register(class) \
({ \
static struct lock_class_key __key; \
__host1x_client_register(class, &__key); \
})
int host1x_client_unregister(struct host1x_client *client); int host1x_client_unregister(struct host1x_client *client);
int host1x_client_suspend(struct host1x_client *client); int host1x_client_suspend(struct host1x_client *client);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment