Commit 148fb2e2 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm-next

ttm:
- Replace ref/unref naming with get/put

amdgpu:
- Revert DC clang fix, causes a segfault with some compiler versions
- SR-IOV fix
- PCIE fix for vega20
- Misc DC fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190201062345.7304-1-alexander.deucher@amd.com
parents 37fdaa33 47dd8048
...@@ -91,10 +91,6 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev ...@@ -91,10 +91,6 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
"XGMI: Set topology failure on device %llx, hive %llx, ret %d", "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
adev->gmc.xgmi.node_id, adev->gmc.xgmi.node_id,
adev->gmc.xgmi.hive_id, ret); adev->gmc.xgmi.hive_id, ret);
else
dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
adev->gmc.xgmi.physical_node_id,
adev->gmc.xgmi.hive_id);
return ret; return ret;
} }
...@@ -160,6 +156,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) ...@@ -160,6 +156,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
break; break;
} }
dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
mutex_unlock(&hive->hive_lock); mutex_unlock(&hive->hive_lock);
exit: exit:
return ret; return ret;
......
...@@ -965,6 +965,10 @@ static int gmc_v9_0_sw_init(void *handle) ...@@ -965,6 +965,10 @@ static int gmc_v9_0_sw_init(void *handle)
* vm size is 256TB (48bit), maximum size of Vega10, * vm size is 256TB (48bit), maximum size of Vega10,
* block size 512 (9bit) * block size 512 (9bit)
*/ */
/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
if (amdgpu_sriov_vf(adev))
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
break; break;
default: default:
......
...@@ -4658,8 +4658,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -4658,8 +4658,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
flip = kzalloc(sizeof(*flip), GFP_KERNEL); flip = kzalloc(sizeof(*flip), GFP_KERNEL);
full = kzalloc(sizeof(*full), GFP_KERNEL); full = kzalloc(sizeof(*full), GFP_KERNEL);
if (!flip || !full) if (!flip || !full) {
dm_error("Failed to allocate update bundles\n"); dm_error("Failed to allocate update bundles\n");
goto cleanup;
}
/* update planes when needed */ /* update planes when needed */
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
...@@ -4883,6 +4885,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -4883,6 +4885,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_state); dc_state);
mutex_unlock(&dm->dc_lock); mutex_unlock(&dm->dc_lock);
} }
cleanup:
kfree(flip);
kfree(full);
} }
/* /*
...@@ -4917,11 +4923,26 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, ...@@ -4917,11 +4923,26 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
*/ */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream) if (drm_atomic_crtc_needs_modeset(new_crtc_state)
&& dm_old_crtc_state->stream) {
/*
* If the stream is removed and CRC capture was
* enabled on the CRTC the extra vblank reference
* needs to be dropped since CRC capture will be
* disabled.
*/
if (!dm_new_crtc_state->stream
&& dm_new_crtc_state->crc_enabled) {
drm_crtc_vblank_put(crtc);
dm_new_crtc_state->crc_enabled = false;
}
manage_dm_interrupts(adev, acrtc, false); manage_dm_interrupts(adev, acrtc, false);
} }
}
/* /*
* Add check here for SoC's that support hardware cursor plane, to * Add check here for SoC's that support hardware cursor plane, to
* unset legacy_cursor_update * unset legacy_cursor_update
...@@ -5152,6 +5173,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -5152,6 +5173,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
continue; continue;
manage_dm_interrupts(adev, acrtc, true); manage_dm_interrupts(adev, acrtc, true);
/* The stream has changed so CRC capture needs to re-enabled. */
if (dm_new_crtc_state->crc_enabled)
amdgpu_dm_crtc_set_crc_source(crtc, "auto");
} }
/* update planes when needed per crtc*/ /* update planes when needed per crtc*/
......
...@@ -64,8 +64,10 @@ amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, ...@@ -64,8 +64,10 @@ amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
{ {
struct amdgpu_device *adev = crtc->dev->dev_private;
struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state); struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
struct dc_stream_state *stream_state = crtc_state->stream; struct dc_stream_state *stream_state = crtc_state->stream;
bool enable;
enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
...@@ -80,29 +82,33 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) ...@@ -80,29 +82,33 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
return -EINVAL; return -EINVAL;
} }
/* When enabling CRC, we should also disable dithering. */ enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO);
if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
if (dc_stream_configure_crc(stream_state->ctx->dc, mutex_lock(&adev->dm.dc_lock);
stream_state, if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
true, true)) { enable, enable)) {
crtc_state->crc_enabled = true; mutex_unlock(&adev->dm.dc_lock);
dc_stream_set_dither_option(stream_state,
DITHER_OPTION_TRUN8);
}
else
return -EINVAL;
} else {
if (dc_stream_configure_crc(stream_state->ctx->dc,
stream_state,
false, false)) {
crtc_state->crc_enabled = false;
dc_stream_set_dither_option(stream_state,
DITHER_OPTION_DEFAULT);
}
else
return -EINVAL; return -EINVAL;
} }
/* When enabling CRC, we should also disable dithering. */
dc_stream_set_dither_option(stream_state,
enable ? DITHER_OPTION_TRUN8
: DITHER_OPTION_DEFAULT);
mutex_unlock(&adev->dm.dc_lock);
/*
* Reading the CRC requires the vblank interrupt handler to be
* enabled. Keep a reference until CRC capture stops.
*/
if (!crtc_state->crc_enabled && enable)
drm_crtc_vblank_get(crtc);
else if (crtc_state->crc_enabled && !enable)
drm_crtc_vblank_put(crtc);
crtc_state->crc_enabled = enable;
/* Reset crc_skipped on dm state */ /* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0; crtc_state->crc_skip_count = 0;
return 0; return 0;
......
...@@ -263,6 +263,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( ...@@ -263,6 +263,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
return true; return true;
} }
/*
* poll pending down reply before clear payload allocation table
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
const struct dc_link *link)
{}
/* /*
* Clear payload allocation table before enable MST DP link. * Clear payload allocation table before enable MST DP link.
......
...@@ -30,7 +30,7 @@ else ifneq ($(call cc-option, -mstack-alignment=16),) ...@@ -30,7 +30,7 @@ else ifneq ($(call cc-option, -mstack-alignment=16),)
cc_stack_align := -mstack-alignment=16 cc_stack_align := -mstack-alignment=16
endif endif
calcs_ccflags := -mhard-float -msse -msse2 $(cc_stack_align) calcs_ccflags := -mhard-float -msse $(cc_stack_align)
CFLAGS_dcn_calcs.o := $(calcs_ccflags) CFLAGS_dcn_calcs.o := $(calcs_ccflags)
CFLAGS_dcn_calc_auto.o := $(calcs_ccflags) CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
......
...@@ -1463,11 +1463,13 @@ static void commit_planes_do_stream_update(struct dc *dc, ...@@ -1463,11 +1463,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->adjust->v_total_min, stream_update->adjust->v_total_min,
stream_update->adjust->v_total_max); stream_update->adjust->v_total_max);
if (stream_update->periodic_fn_vsync_delta && if (stream_update->vline0_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, pipe_ctx->stream_res.tg, VLINE0, stream->vline0_config);
pipe_ctx->stream->periodic_fn_vsync_delta);
if (stream_update->vline1_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
pipe_ctx->stream_res.tg, VLINE1, stream->vline1_config);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket || stream_update->vrr_infopacket ||
......
...@@ -1467,6 +1467,11 @@ static enum dc_status enable_link_dp_mst( ...@@ -1467,6 +1467,11 @@ static enum dc_status enable_link_dp_mst(
if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
return DC_OK; return DC_OK;
/* to make sure the pending down rep can be processed
* before clear payload table
*/
dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
/* clear payload table */ /* clear payload table */
dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link); dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
......
...@@ -45,6 +45,11 @@ struct freesync_context { ...@@ -45,6 +45,11 @@ struct freesync_context {
bool dummy; bool dummy;
}; };
struct vline_config {
unsigned int start_line;
unsigned int end_line;
};
struct dc_stream_state { struct dc_stream_state {
// sink is deprecated, new code should not reference // sink is deprecated, new code should not reference
// this pointer // this pointer
...@@ -85,8 +90,6 @@ struct dc_stream_state { ...@@ -85,8 +90,6 @@ struct dc_stream_state {
uint8_t qs_bit; uint8_t qs_bit;
uint8_t qy_bit; uint8_t qy_bit;
unsigned long long periodic_fn_vsync_delta;
/* TODO: custom INFO packets */ /* TODO: custom INFO packets */
/* TODO: ABM info (DMCU) */ /* TODO: ABM info (DMCU) */
/* PSR info */ /* PSR info */
...@@ -96,6 +99,9 @@ struct dc_stream_state { ...@@ -96,6 +99,9 @@ struct dc_stream_state {
/* DMCU info */ /* DMCU info */
unsigned int abm_level; unsigned int abm_level;
struct vline_config vline0_config;
struct vline_config vline1_config;
/* from core_stream struct */ /* from core_stream struct */
struct dc_context *ctx; struct dc_context *ctx;
...@@ -143,7 +149,9 @@ struct dc_stream_update { ...@@ -143,7 +149,9 @@ struct dc_stream_update {
struct dc_info_packet *hdr_static_metadata; struct dc_info_packet *hdr_static_metadata;
unsigned int *abm_level; unsigned int *abm_level;
unsigned long long *periodic_fn_vsync_delta; struct vline_config *vline0_config;
struct vline_config *vline1_config;
struct dc_crtc_timing_adjust *adjust; struct dc_crtc_timing_adjust *adjust;
struct dc_info_packet *vrr_infopacket; struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket; struct dc_info_packet *vsc_infopacket;
......
...@@ -92,68 +92,26 @@ static void optc1_disable_stereo(struct timing_generator *optc) ...@@ -92,68 +92,26 @@ static void optc1_disable_stereo(struct timing_generator *optc)
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
} }
static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
{
struct dc_crtc_timing patched_crtc_timing;
int vesa_sync_start;
int asic_blank_end;
int vertical_line_start;
patched_crtc_timing = *dc_crtc_timing;
optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
vesa_sync_start = patched_crtc_timing.v_addressable +
patched_crtc_timing.v_border_bottom +
patched_crtc_timing.v_front_porch;
asic_blank_end = (patched_crtc_timing.v_total -
vesa_sync_start -
patched_crtc_timing.v_border_top);
vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
if (vertical_line_start < 0)
vertical_line_start = 0;
return vertical_line_start;
}
void optc1_program_vline_interrupt( void optc1_program_vline_interrupt(
struct timing_generator *optc, struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing, enum vline_select vline,
unsigned long long vsync_delta) struct vline_config vline_config)
{ {
struct optc *optc1 = DCN10TG_FROM_TG(optc); struct optc *optc1 = DCN10TG_FROM_TG(optc);
unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000); switch (vline) {
unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_100hz + 999), 1000); case VLINE0:
uint32_t req_delta_lines = (uint32_t) div64_u64(
(req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1),
dc_crtc_timing->h_total);
uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing);
uint32_t start_line = 0;
uint32_t end_line = 0;
if (req_delta_lines != 0)
req_delta_lines--;
if (req_delta_lines > vsync_line)
start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) + 2;
else
start_line = vsync_line - req_delta_lines;
end_line = start_line + 2;
if (start_line >= dc_crtc_timing->v_total)
start_line = start_line % dc_crtc_timing->v_total;
if (end_line >= dc_crtc_timing->v_total)
end_line = end_line % dc_crtc_timing->v_total;
REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0, REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
OTG_VERTICAL_INTERRUPT0_LINE_START, start_line, OTG_VERTICAL_INTERRUPT0_LINE_START, vline_config.start_line,
OTG_VERTICAL_INTERRUPT0_LINE_END, end_line); OTG_VERTICAL_INTERRUPT0_LINE_END, vline_config.end_line);
break;
case VLINE1:
REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0,
OTG_VERTICAL_INTERRUPT1_LINE_START, vline_config.start_line);
break;
default:
break;
}
} }
/** /**
......
...@@ -67,6 +67,8 @@ ...@@ -67,6 +67,8 @@
SRI(OTG_CLOCK_CONTROL, OTG, inst),\ SRI(OTG_CLOCK_CONTROL, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
...@@ -135,6 +137,8 @@ struct dcn_optc_registers { ...@@ -135,6 +137,8 @@ struct dcn_optc_registers {
uint32_t OTG_CLOCK_CONTROL; uint32_t OTG_CLOCK_CONTROL;
uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL; uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL;
uint32_t OTG_VERTICAL_INTERRUPT0_POSITION; uint32_t OTG_VERTICAL_INTERRUPT0_POSITION;
uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL;
uint32_t OTG_VERTICAL_INTERRUPT1_POSITION;
uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL; uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
uint32_t OTG_VERTICAL_INTERRUPT2_POSITION; uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
uint32_t OPTC_INPUT_CLOCK_CONTROL; uint32_t OPTC_INPUT_CLOCK_CONTROL;
...@@ -227,6 +231,8 @@ struct dcn_optc_registers { ...@@ -227,6 +231,8 @@ struct dcn_optc_registers {
SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
...@@ -361,6 +367,8 @@ struct dcn_optc_registers { ...@@ -361,6 +367,8 @@ struct dcn_optc_registers {
type OTG_VERTICAL_INTERRUPT0_INT_ENABLE;\ type OTG_VERTICAL_INTERRUPT0_INT_ENABLE;\
type OTG_VERTICAL_INTERRUPT0_LINE_START;\ type OTG_VERTICAL_INTERRUPT0_LINE_START;\
type OTG_VERTICAL_INTERRUPT0_LINE_END;\ type OTG_VERTICAL_INTERRUPT0_LINE_END;\
type OTG_VERTICAL_INTERRUPT1_INT_ENABLE;\
type OTG_VERTICAL_INTERRUPT1_LINE_START;\
type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\ type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\
type OTG_VERTICAL_INTERRUPT2_LINE_START;\ type OTG_VERTICAL_INTERRUPT2_LINE_START;\
type OPTC_INPUT_CLK_EN;\ type OPTC_INPUT_CLK_EN;\
...@@ -476,8 +484,8 @@ void optc1_program_timing( ...@@ -476,8 +484,8 @@ void optc1_program_timing(
bool use_vbios); bool use_vbios);
void optc1_program_vline_interrupt(struct timing_generator *optc, void optc1_program_vline_interrupt(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing, enum vline_select vline,
unsigned long long vsync_delta); struct vline_config vline_config);
void optc1_program_global_sync( void optc1_program_global_sync(
struct timing_generator *optc); struct timing_generator *optc);
......
...@@ -57,6 +57,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( ...@@ -57,6 +57,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
struct dp_mst_stream_allocation_table *proposed_table, struct dp_mst_stream_allocation_table *proposed_table,
bool enable); bool enable);
/*
* poll pending down reply before clear payload allocation table
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
const struct dc_link *link);
/* /*
* Clear payload allocation table before enable MST DP link. * Clear payload allocation table before enable MST DP link.
*/ */
......
...@@ -30,7 +30,7 @@ else ifneq ($(call cc-option, -mstack-alignment=16),) ...@@ -30,7 +30,7 @@ else ifneq ($(call cc-option, -mstack-alignment=16),)
cc_stack_align := -mstack-alignment=16 cc_stack_align := -mstack-alignment=16
endif endif
dml_ccflags := -mhard-float -msse -msse2 $(cc_stack_align) dml_ccflags := -mhard-float -msse $(cc_stack_align)
CFLAGS_display_mode_lib.o := $(dml_ccflags) CFLAGS_display_mode_lib.o := $(dml_ccflags)
CFLAGS_display_pipe_clocks.o := $(dml_ccflags) CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
......
...@@ -134,6 +134,15 @@ struct dc_crtc_timing; ...@@ -134,6 +134,15 @@ struct dc_crtc_timing;
struct drr_params; struct drr_params;
struct vline_config;
enum vline_select {
VLINE0,
VLINE1,
VLINE2
};
struct timing_generator_funcs { struct timing_generator_funcs {
bool (*validate_timing)(struct timing_generator *tg, bool (*validate_timing)(struct timing_generator *tg,
const struct dc_crtc_timing *timing); const struct dc_crtc_timing *timing);
...@@ -141,8 +150,8 @@ struct timing_generator_funcs { ...@@ -141,8 +150,8 @@ struct timing_generator_funcs {
const struct dc_crtc_timing *timing, const struct dc_crtc_timing *timing,
bool use_vbios); bool use_vbios);
void (*program_vline_interrupt)(struct timing_generator *optc, void (*program_vline_interrupt)(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing, enum vline_select vline,
unsigned long long vsync_delta); struct vline_config vline_config);
bool (*enable_crtc)(struct timing_generator *tg); bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg); bool (*disable_crtc)(struct timing_generator *tg);
bool (*is_counter_moving)(struct timing_generator *tg); bool (*is_counter_moving)(struct timing_generator *tg);
......
...@@ -144,6 +144,14 @@ enum dc_irq_source { ...@@ -144,6 +144,14 @@ enum dc_irq_source {
DC_IRQ_SOURCE_DC5_VLINE0, DC_IRQ_SOURCE_DC5_VLINE0,
DC_IRQ_SOURCE_DC6_VLINE0, DC_IRQ_SOURCE_DC6_VLINE0,
DC_IRQ_SOURCE_DC1_VLINE1,
DC_IRQ_SOURCE_DC2_VLINE1,
DC_IRQ_SOURCE_DC3_VLINE1,
DC_IRQ_SOURCE_DC4_VLINE1,
DC_IRQ_SOURCE_DC5_VLINE1,
DC_IRQ_SOURCE_DC6_VLINE1,
DAL_IRQ_SOURCES_NUMBER DAL_IRQ_SOURCES_NUMBER
}; };
......
...@@ -771,6 +771,47 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) ...@@ -771,6 +771,47 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t pcie_speed = 0, pcie_width = 0, pcie_arg;
int ret;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_speed = 16;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
pcie_speed = 8;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
pcie_speed = 5;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
pcie_speed = 2;
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
pcie_width = 32;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
pcie_width = 16;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
pcie_width = 12;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
pcie_width = 8;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
pcie_width = 4;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
pcie_width = 2;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
pcie_width = 1;
pcie_arg = pcie_width | (pcie_speed << 8);
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, pcie_arg);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
return 0;
}
static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
{ {
struct vega20_hwmgr *data = struct vega20_hwmgr *data =
...@@ -1570,6 +1611,11 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) ...@@ -1570,6 +1611,11 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"[EnableDPMTasks] Failed to initialize SMC table!", "[EnableDPMTasks] Failed to initialize SMC table!",
return result); return result);
result = vega20_override_pcie_parameters(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"[EnableDPMTasks] Failed to override pcie parameters!",
return result);
result = vega20_run_btc(hwmgr); result = vega20_run_btc(hwmgr);
PP_ASSERT_WITH_CODE(!result, PP_ASSERT_WITH_CODE(!result,
"[EnableDPMTasks] Failed to run btc!", "[EnableDPMTasks] Failed to run btc!",
......
...@@ -639,13 +639,9 @@ int ast_dumb_create(struct drm_file *file, ...@@ -639,13 +639,9 @@ int ast_dumb_create(struct drm_file *file,
static void ast_bo_unref(struct ast_bo **bo) static void ast_bo_unref(struct ast_bo **bo)
{ {
struct ttm_buffer_object *tbo;
if ((*bo) == NULL) if ((*bo) == NULL)
return; return;
ttm_bo_put(&((*bo)->bo));
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
*bo = NULL; *bo = NULL;
} }
......
...@@ -318,13 +318,9 @@ int mgag200_dumb_create(struct drm_file *file, ...@@ -318,13 +318,9 @@ int mgag200_dumb_create(struct drm_file *file,
static void mgag200_bo_unref(struct mgag200_bo **bo) static void mgag200_bo_unref(struct mgag200_bo **bo)
{ {
struct ttm_buffer_object *tbo;
if ((*bo) == NULL) if ((*bo) == NULL)
return; return;
ttm_bo_put(&((*bo)->bo));
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
*bo = NULL; *bo = NULL;
} }
......
...@@ -61,12 +61,14 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) ...@@ -61,12 +61,14 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return -EINVAL; return -EINVAL;
prev = *pnvbo; prev = *pnvbo;
*pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL; if (ref) {
if (prev) { ttm_bo_get(&ref->bo);
struct ttm_buffer_object *bo = &prev->bo; *pnvbo = nouveau_bo(&ref->bo);
} else {
ttm_bo_unref(&bo); *pnvbo = NULL;
} }
if (prev)
ttm_bo_put(&prev->bo);
return 0; return 0;
} }
......
...@@ -41,7 +41,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) ...@@ -41,7 +41,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
{ {
struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
struct device *dev = drm->dev->dev; struct device *dev = drm->dev->dev;
int ret; int ret;
...@@ -56,7 +55,7 @@ nouveau_gem_object_del(struct drm_gem_object *gem) ...@@ -56,7 +55,7 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
/* reset filp so nouveau_bo_del_ttm() can test for it */ /* reset filp so nouveau_bo_del_ttm() can test for it */
gem->filp = NULL; gem->filp = NULL;
ttm_bo_unref(&bo); ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev); pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev); pm_runtime_put_autosuspend(dev);
......
...@@ -679,15 +679,6 @@ void ttm_bo_put(struct ttm_buffer_object *bo) ...@@ -679,15 +679,6 @@ void ttm_bo_put(struct ttm_buffer_object *bo)
} }
EXPORT_SYMBOL(ttm_bo_put); EXPORT_SYMBOL(ttm_bo_put);
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
*p_bo = NULL;
ttm_bo_put(bo);
}
EXPORT_SYMBOL(ttm_bo_unref);
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{ {
return cancel_delayed_work_sync(&bdev->wq); return cancel_delayed_work_sync(&bdev->wq);
......
...@@ -534,7 +534,6 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base) ...@@ -534,7 +534,6 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)
{ {
struct vmw_user_buffer_object *vmw_user_bo; struct vmw_user_buffer_object *vmw_user_bo;
struct ttm_base_object *base = *p_base; struct ttm_base_object *base = *p_base;
struct ttm_buffer_object *bo;
*p_base = NULL; *p_base = NULL;
...@@ -543,8 +542,7 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base) ...@@ -543,8 +542,7 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)
vmw_user_bo = container_of(base, struct vmw_user_buffer_object, vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base); prime.base);
bo = &vmw_user_bo->vbo.base; ttm_bo_put(&vmw_user_bo->vbo.base);
ttm_bo_unref(&bo);
} }
...@@ -597,7 +595,6 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv, ...@@ -597,7 +595,6 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
struct ttm_base_object **p_base) struct ttm_base_object **p_base)
{ {
struct vmw_user_buffer_object *user_bo; struct vmw_user_buffer_object *user_bo;
struct ttm_buffer_object *tmp;
int ret; int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
...@@ -614,7 +611,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv, ...@@ -614,7 +611,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
tmp = ttm_bo_reference(&user_bo->vbo.base); ttm_bo_get(&user_bo->vbo.base);
ret = ttm_prime_object_init(tfile, ret = ttm_prime_object_init(tfile,
size, size,
&user_bo->prime, &user_bo->prime,
...@@ -623,7 +620,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv, ...@@ -623,7 +620,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
&vmw_user_bo_release, &vmw_user_bo_release,
&vmw_user_bo_ref_obj_release); &vmw_user_bo_ref_obj_release);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp); ttm_bo_put(&user_bo->vbo.base);
goto out_no_base_object; goto out_no_base_object;
} }
...@@ -911,7 +908,7 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile, ...@@ -911,7 +908,7 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
vmw_user_bo = container_of(base, struct vmw_user_buffer_object, vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
prime.base); prime.base);
(void)ttm_bo_reference(&vmw_user_bo->vbo.base); ttm_bo_get(&vmw_user_bo->vbo.base);
if (p_base) if (p_base)
*p_base = base; *p_base = base;
else else
......
...@@ -1276,8 +1276,10 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, ...@@ -1276,8 +1276,10 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
return 0; return 0;
out_no_map: out_no_map:
if (man->using_mob) if (man->using_mob) {
ttm_bo_unref(&man->cmd_space); ttm_bo_put(man->cmd_space);
man->cmd_space = NULL;
}
return ret; return ret;
} }
...@@ -1380,7 +1382,8 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) ...@@ -1380,7 +1382,8 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
(void) vmw_cmdbuf_idle(man, false, 10*HZ); (void) vmw_cmdbuf_idle(man, false, 10*HZ);
if (man->using_mob) { if (man->using_mob) {
(void) ttm_bo_kunmap(&man->map_obj); (void) ttm_bo_kunmap(&man->map_obj);
ttm_bo_unref(&man->cmd_space); ttm_bo_put(man->cmd_space);
man->cmd_space = NULL;
} else { } else {
dma_free_coherent(&man->dev_priv->dev->pdev->dev, dma_free_coherent(&man->dev_priv->dev->pdev->dev,
man->size, man->map, man->handle); man->size, man->map, man->handle);
......
...@@ -1337,18 +1337,15 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) ...@@ -1337,18 +1337,15 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
*buf = NULL; *buf = NULL;
if (tmp_buf != NULL) { if (tmp_buf != NULL) {
struct ttm_buffer_object *bo = &tmp_buf->base; ttm_bo_put(&tmp_buf->base);
ttm_bo_unref(&bo);
} }
} }
static inline struct vmw_buffer_object * static inline struct vmw_buffer_object *
vmw_bo_reference(struct vmw_buffer_object *buf) vmw_bo_reference(struct vmw_buffer_object *buf)
{ {
if (ttm_bo_reference(&buf->base)) ttm_bo_get(&buf->base);
return buf; return buf;
return NULL;
} }
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
......
...@@ -300,7 +300,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, ...@@ -300,7 +300,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
&batch->otables[i]); &batch->otables[i]);
} }
ttm_bo_unref(&batch->otable_bo); ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
out_no_bo: out_no_bo:
return ret; return ret;
} }
...@@ -365,7 +366,8 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ...@@ -365,7 +366,8 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
vmw_bo_fence_single(bo, NULL); vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
ttm_bo_unref(&batch->otable_bo); ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
} }
/* /*
...@@ -463,7 +465,8 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, ...@@ -463,7 +465,8 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
out_unreserve: out_unreserve:
ttm_bo_unreserve(mob->pt_bo); ttm_bo_unreserve(mob->pt_bo);
ttm_bo_unref(&mob->pt_bo); ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
return ret; return ret;
} }
...@@ -580,8 +583,10 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, ...@@ -580,8 +583,10 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
*/ */
void vmw_mob_destroy(struct vmw_mob *mob) void vmw_mob_destroy(struct vmw_mob *mob)
{ {
if (mob->pt_bo) if (mob->pt_bo) {
ttm_bo_unref(&mob->pt_bo); ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
}
kfree(mob); kfree(mob);
} }
...@@ -698,8 +703,10 @@ int vmw_mob_bind(struct vmw_private *dev_priv, ...@@ -698,8 +703,10 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
out_no_cmd_space: out_no_cmd_space:
vmw_fifo_resource_dec(dev_priv); vmw_fifo_resource_dec(dev_priv);
if (pt_set_up) if (pt_set_up) {
ttm_bo_unref(&mob->pt_bo); ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
}
return -ENOMEM; return -ENOMEM;
} }
...@@ -461,7 +461,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, ...@@ -461,7 +461,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
} }
INIT_LIST_HEAD(&val_list); INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base); ttm_bo_get(&res->backup->base);
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0; val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list); list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
...@@ -484,7 +485,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, ...@@ -484,7 +485,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate: out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list); ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve: out_no_reserve:
ttm_bo_unref(&val_buf->bo); ttm_bo_put(val_buf->bo);
val_buf->bo = NULL;
if (backup_dirty) if (backup_dirty)
vmw_bo_unreference(&res->backup); vmw_bo_unreference(&res->backup);
...@@ -544,7 +546,8 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -544,7 +546,8 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list); INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list); list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list); ttm_eu_backoff_reservation(ticket, &val_list);
ttm_bo_unref(&val_buf->bo); ttm_bo_put(val_buf->bo);
val_buf->bo = NULL;
} }
/** /**
......
...@@ -628,8 +628,10 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx) ...@@ -628,8 +628,10 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_bo_node *entry; struct vmw_validation_bo_node *entry;
struct vmw_validation_res_node *val; struct vmw_validation_res_node *val;
list_for_each_entry(entry, &ctx->bo_list, base.head) list_for_each_entry(entry, &ctx->bo_list, base.head) {
ttm_bo_unref(&entry->base.bo); ttm_bo_put(entry->base.bo);
entry->base.bo = NULL;
}
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list); list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
list_for_each_entry(val, &ctx->resource_list, head) list_for_each_entry(val, &ctx->resource_list, head)
......
...@@ -295,23 +295,6 @@ static inline void ttm_bo_get(struct ttm_buffer_object *bo) ...@@ -295,23 +295,6 @@ static inline void ttm_bo_get(struct ttm_buffer_object *bo)
kref_get(&bo->kref); kref_get(&bo->kref);
} }
/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
* @bo: The buffer object.
*
* Returns a refcounted pointer to a buffer object.
*
* This function is deprecated. Use @ttm_bo_get instead.
*/
static inline struct ttm_buffer_object *
ttm_bo_reference(struct ttm_buffer_object *bo)
{
ttm_bo_get(bo);
return bo;
}
/** /**
* ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
* its refcount has already reached zero. * its refcount has already reached zero.
...@@ -386,17 +369,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ...@@ -386,17 +369,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/ */
void ttm_bo_put(struct ttm_buffer_object *bo); void ttm_bo_put(struct ttm_buffer_object *bo);
/**
* ttm_bo_unref
*
* @bo: The buffer object.
*
* Unreference and clear a pointer to a buffer object.
*
* This function is deprecated. Use @ttm_bo_put instead.
*/
void ttm_bo_unref(struct ttm_buffer_object **bo);
/** /**
* ttm_bo_add_to_lru * ttm_bo_add_to_lru
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment