Commit fd3e14ff authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-4.5' of git://people.freedesktop.org/~agd5f/linux into drm-next

[airlied: fixup build problems on arm - added errno.h include]
* 'drm-next-4.5' of git://people.freedesktop.org/~agd5f/linux: (152 commits)
  amd/powerplay: fix copy paste typo in hardwaremanager.c
  amd/powerplay: disable powerplay by default initially
  amd/powerplay: don't enable ucode fan control if vbios has no fan table
  drm/amd/powerplay: show gpu load when print gpu performance for Cz. (v2)
  drm/amd/powerplay: check whether need to enable thermal control. (v2)
  drm/amd/powerplay: add point check to avoid NULL point hang.
  drm/amdgpu/powerplay: Program a calculated value as Deep Sleep clock.
  drm/amd/powerplay: Don't return an error if fan table is missing
  drm/powerplay/hwmgr: log errors in tonga_hwmgr_backend_init
  drm/powerplay: add debugging output to processpptables.c
  drm/powerplay: add debugging output to tonga_processpptables.c
  amd/powerplay: Add structures required to report configuration change
  amd/powerplay: Fix get dal power level
  amd\powerplay Implement get dal power level
  drm/amd/powerplay: display gpu load when print performance for tonga.
  drm/amdgpu/powerplay: enable sysfs and debugfs interfaces late
  drm/amd/powerplay: move shared function of vi to hwmgr. (v2)
  drm/amd/powerplay: check whether enable dpm in powerplay.
  drm/amd/powerplay: fix bug that dpm funcs in debugfs/sysfs missing.
  drm/amd/powerplay: fix boolreturn.cocci warnings
  ...
parents 91161995 eafbbd98
......@@ -160,6 +160,7 @@ config DRM_AMDGPU
If M is selected, the module will be called amdgpu.
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/amd/powerplay/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
......
......@@ -2,10 +2,13 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \
-Idrivers/gpu/drm/amd/include \
-Idrivers/gpu/drm/amd/amdgpu \
-Idrivers/gpu/drm/amd/scheduler
FULL_AMD_PATH=$(src)/..
ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \
-I$(FULL_AMD_PATH)/powerplay/inc
amdgpu-y := amdgpu_drv.o
......@@ -44,6 +47,7 @@ amdgpu-y += \
# add SMC block
amdgpu-y += \
amdgpu_dpm.o \
amdgpu_powerplay.o \
cz_smc.o cz_dpm.o \
tonga_smc.o tonga_dpm.o \
fiji_smc.o fiji_dpm.o \
......@@ -94,6 +98,14 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
ifneq ($(CONFIG_DRM_AMD_POWERPLAY),)
include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
endif
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
CFLAGS_amdgpu_trace_points.o := -I$(src)
This diff is collapsed.
......@@ -29,66 +29,10 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "amdgpu.h"
#include "amdgpu_acpi.h"
#include "amd_acpi.h"
#include "atom.h"
#define ACPI_AC_CLASS "ac_adapter"
extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
struct atif_verify_interface {
u16 size; /* structure size in bytes (includes size field) */
u16 version; /* version */
u32 notification_mask; /* supported notifications mask */
u32 function_bits; /* supported functions bit vector */
} __packed;
struct atif_system_params {
u16 size; /* structure size in bytes (includes size field) */
u32 valid_mask; /* valid flags mask */
u32 flags; /* flags */
u8 command_code; /* notify command code */
} __packed;
struct atif_sbios_requests {
u16 size; /* structure size in bytes (includes size field) */
u32 pending; /* pending sbios requests */
u8 panel_exp_mode; /* panel expansion mode */
u8 thermal_gfx; /* thermal state: target gfx controller */
u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
u8 forced_power_gfx; /* forced power state: target gfx controller */
u8 forced_power_state; /* forced power state: state id */
u8 system_power_src; /* system power source */
u8 backlight_level; /* panel backlight level (0-255) */
} __packed;
#define ATIF_NOTIFY_MASK 0x3
#define ATIF_NOTIFY_NONE 0
#define ATIF_NOTIFY_81 1
#define ATIF_NOTIFY_N 2
struct atcs_verify_interface {
u16 size; /* structure size in bytes (includes size field) */
u16 version; /* version */
u32 function_bits; /* supported functions bit vector */
} __packed;
#define ATCS_VALID_FLAGS_MASK 0x3
struct atcs_pref_req_input {
u16 size; /* structure size in bytes (includes size field) */
u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
u16 valid_flags_mask; /* valid flags mask */
u16 flags; /* flags */
u8 req_type; /* request type */
u8 perf_req; /* performance request */
} __packed;
struct atcs_pref_req_output {
u16 size; /* structure size in bytes (includes size field) */
u8 ret_val; /* return value */
} __packed;
/* Call the ATIF method
*/
/**
......
......@@ -11,7 +11,7 @@
#include <linux/acpi.h>
#include <linux/pci.h>
#include "amdgpu_acpi.h"
#include "amd_acpi.h"
struct amdgpu_atpx_functions {
bool px_params;
......
......@@ -35,6 +35,13 @@
* BIOS.
*/
#define AMD_VBIOS_SIGNATURE " 761295520"
#define AMD_VBIOS_SIGNATURE_OFFSET 0x30
#define AMD_VBIOS_SIGNATURE_SIZE sizeof(AMD_VBIOS_SIGNATURE)
#define AMD_VBIOS_SIGNATURE_END (AMD_VBIOS_SIGNATURE_OFFSET + AMD_VBIOS_SIGNATURE_SIZE)
#define AMD_IS_VALID_VBIOS(p) ((p)[0] == 0x55 && (p)[1] == 0xAA)
#define AMD_VBIOS_LENGTH(p) ((p)[2] << 9)
/* If you boot an IGP board with a discrete card as the primary,
* the IGP rom is not accessible via the rom bar as the IGP rom is
* part of the system bios. On boot, the system bios puts a
......@@ -58,7 +65,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
return false;
}
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) {
iounmap(bios);
return false;
}
......@@ -74,7 +81,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
bool amdgpu_read_bios(struct amdgpu_device *adev)
{
uint8_t __iomem *bios, val1, val2;
uint8_t __iomem *bios, val[2];
size_t size;
adev->bios = NULL;
......@@ -84,10 +91,10 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
return false;
}
val1 = readb(&bios[0]);
val2 = readb(&bios[1]);
val[0] = readb(&bios[0]);
val[1] = readb(&bios[1]);
if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
if (size == 0 || !AMD_IS_VALID_VBIOS(val)) {
pci_unmap_rom(adev->pdev, bios);
return false;
}
......@@ -101,6 +108,38 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
return true;
}
static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
{
u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0};
int len;
if (!adev->asic_funcs->read_bios_from_rom)
return false;
/* validate VBIOS signature */
if (amdgpu_asic_read_bios_from_rom(adev, &header[0], sizeof(header)) == false)
return false;
header[AMD_VBIOS_SIGNATURE_END] = 0;
if ((!AMD_IS_VALID_VBIOS(header)) ||
0 != memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
AMD_VBIOS_SIGNATURE,
strlen(AMD_VBIOS_SIGNATURE)))
return false;
/* valid vbios, go on */
len = AMD_VBIOS_LENGTH(header);
len = ALIGN(len, 4);
adev->bios = kmalloc(len, GFP_KERNEL);
if (!adev->bios) {
DRM_ERROR("no memory to allocate for BIOS\n");
return false;
}
/* read complete BIOS */
return amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
}
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
{
uint8_t __iomem *bios;
......@@ -113,7 +152,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
return false;
}
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) {
return false;
}
adev->bios = kmemdup(bios, size, GFP_KERNEL);
......@@ -230,7 +269,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
break;
}
if (i == 0 || adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) {
if (i == 0 || !AMD_IS_VALID_VBIOS(adev->bios)) {
kfree(adev->bios);
return false;
}
......@@ -319,6 +358,9 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
r = igp_read_bios_from_vram(adev);
if (r == false)
r = amdgpu_read_bios(adev);
if (r == false) {
r = amdgpu_read_bios_from_rom(adev);
}
if (r == false) {
r = amdgpu_read_disabled_bios(adev);
}
......@@ -330,7 +372,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
adev->bios = NULL;
return false;
}
if (adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) {
if (!AMD_IS_VALID_VBIOS(adev->bios)) {
printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]);
goto free_bios;
}
......
......@@ -24,6 +24,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <drm/drmP.h>
#include <linux/firmware.h>
#include <drm/amdgpu_drm.h>
......@@ -32,7 +33,6 @@
#include "atom.h"
#include "amdgpu_ucode.h"
struct amdgpu_cgs_device {
struct cgs_device base;
struct amdgpu_device *adev;
......@@ -703,6 +703,9 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
case CHIP_TONGA:
strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
......@@ -736,6 +739,288 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
return 0;
}
static int amdgpu_cgs_query_system_info(void *cgs_device,
struct cgs_system_info *sys_info)
{
CGS_FUNC_ADEV;
if (NULL == sys_info)
return -ENODEV;
if (sizeof(struct cgs_system_info) != sys_info->size)
return -ENODEV;
switch (sys_info->info_id) {
case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
break;
case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
sys_info->value = adev->pm.pcie_gen_mask;
break;
case CGS_SYSTEM_INFO_PCIE_MLW:
sys_info->value = adev->pm.pcie_mlw_mask;
break;
default:
return -ENODEV;
}
return 0;
}
static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
struct cgs_display_info *info)
{
CGS_FUNC_ADEV;
struct amdgpu_crtc *amdgpu_crtc;
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
uint32_t line_time_us, vblank_lines;
if (info == NULL)
return -EINVAL;
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled) {
info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
info->display_count++;
}
if (info->mode_info != NULL &&
crtc->enabled && amdgpu_crtc->enabled &&
amdgpu_crtc->hw_mode.clock) {
line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
amdgpu_crtc->hw_mode.clock;
vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
(amdgpu_crtc->v_border * 2);
info->mode_info->vblank_time_us = vblank_lines * line_time_us;
info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
info->mode_info->ref_clock = adev->clock.spll.reference_freq;
info->mode_info++;
}
}
}
return 0;
}
/** \brief evaluate acpi namespace object, handle or pathname must be valid
* \param cgs_device
* \param info input/output arguments for the control method
* \return status
*/
#if defined(CONFIG_ACPI)
static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
struct cgs_acpi_method_info *info)
{
CGS_FUNC_ADEV;
acpi_handle handle;
struct acpi_object_list input;
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *params = NULL;
union acpi_object *obj = NULL;
uint8_t name[5] = {'\0'};
struct cgs_acpi_method_argument *argument = NULL;
uint32_t i, count;
acpi_status status;
int result;
uint32_t func_no = 0xFFFFFFFF;
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
return -ENODEV;
memset(&input, 0, sizeof(struct acpi_object_list));
/* validate input info */
if (info->size != sizeof(struct cgs_acpi_method_info))
return -EINVAL;
input.count = info->input_count;
if (info->input_count > 0) {
if (info->pinput_argument == NULL)
return -EINVAL;
argument = info->pinput_argument;
func_no = argument->value;
for (i = 0; i < info->input_count; i++) {
if (((argument->type == ACPI_TYPE_STRING) ||
(argument->type == ACPI_TYPE_BUFFER))
&& (argument->pointer == NULL))
return -EINVAL;
argument++;
}
}
if (info->output_count > 0) {
if (info->poutput_argument == NULL)
return -EINVAL;
argument = info->poutput_argument;
for (i = 0; i < info->output_count; i++) {
if (((argument->type == ACPI_TYPE_STRING) ||
(argument->type == ACPI_TYPE_BUFFER))
&& (argument->pointer == NULL))
return -EINVAL;
argument++;
}
}
/* The path name passed to acpi_evaluate_object should be null terminated */
if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
strncpy(name, (char *)&(info->name), sizeof(uint32_t));
name[4] = '\0';
}
/* parse input parameters */
if (input.count > 0) {
input.pointer = params =
kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
if (params == NULL)
return -EINVAL;
argument = info->pinput_argument;
for (i = 0; i < input.count; i++) {
params->type = argument->type;
switch (params->type) {
case ACPI_TYPE_INTEGER:
params->integer.value = argument->value;
break;
case ACPI_TYPE_STRING:
params->string.length = argument->method_length;
params->string.pointer = argument->pointer;
break;
case ACPI_TYPE_BUFFER:
params->buffer.length = argument->method_length;
params->buffer.pointer = argument->pointer;
break;
default:
break;
}
params++;
argument++;
}
}
/* parse output info */
count = info->output_count;
argument = info->poutput_argument;
/* evaluate the acpi method */
status = acpi_evaluate_object(handle, name, &input, &output);
if (ACPI_FAILURE(status)) {
result = -EIO;
goto error;
}
/* return the output info */
obj = output.pointer;
if (count > 1) {
if ((obj->type != ACPI_TYPE_PACKAGE) ||
(obj->package.count != count)) {
result = -EIO;
goto error;
}
params = obj->package.elements;
} else
params = obj;
if (params == NULL) {
result = -EIO;
goto error;
}
for (i = 0; i < count; i++) {
if (argument->type != params->type) {
result = -EIO;
goto error;
}
switch (params->type) {
case ACPI_TYPE_INTEGER:
argument->value = params->integer.value;
break;
case ACPI_TYPE_STRING:
if ((params->string.length != argument->data_length) ||
(params->string.pointer == NULL)) {
result = -EIO;
goto error;
}
strncpy(argument->pointer,
params->string.pointer,
params->string.length);
break;
case ACPI_TYPE_BUFFER:
if (params->buffer.pointer == NULL) {
result = -EIO;
goto error;
}
memcpy(argument->pointer,
params->buffer.pointer,
argument->data_length);
break;
default:
break;
}
argument++;
params++;
}
error:
if (obj != NULL)
kfree(obj);
kfree((void *)input.pointer);
return result;
}
#else
static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
struct cgs_acpi_method_info *info)
{
return -EIO;
}
#endif
int amdgpu_cgs_call_acpi_method(void *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
uint32_t output_count,
uint32_t input_size,
uint32_t output_size)
{
struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
struct cgs_acpi_method_argument acpi_output = {0};
struct cgs_acpi_method_info info = {0};
acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
acpi_input[0].method_length = sizeof(uint32_t);
acpi_input[0].data_length = sizeof(uint32_t);
acpi_input[0].value = acpi_function;
acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_input[1].data_length = input_size;
acpi_input[1].pointer = pinput;
acpi_output.type = CGS_ACPI_TYPE_BUFFER;
acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_output.data_length = output_size;
acpi_output.pointer = poutput;
info.size = sizeof(struct cgs_acpi_method_info);
info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
info.input_count = 2;
info.name = acpi_method;
info.pinput_argument = acpi_input;
info.output_count = output_count;
info.poutput_argument = &acpi_output;
return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
}
static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_gpu_mem_info,
amdgpu_cgs_gmap_kmem,
......@@ -768,7 +1053,10 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_set_camera_voltages,
amdgpu_cgs_get_firmware_info,
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state
amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
......
......@@ -388,17 +388,18 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
amdgpu_cs_buckets_get_list(&buckets, &p->validated);
}
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
&p->validated);
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
INIT_LIST_HEAD(&duplicates);
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
if (unlikely(r != 0))
goto error_reserve;
amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
if (r)
goto error_validate;
......@@ -480,7 +481,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
if (parser->bo_list)
amdgpu_bo_list_put(parser->bo_list);
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
......
......@@ -25,7 +25,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
struct amdgpu_ctx *ctx)
{
unsigned i, j;
......@@ -35,17 +35,25 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
ctx->adev = adev;
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
ctx->rings[i].sequence = 1;
ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
AMDGPU_MAX_RINGS, GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
amdgpu_sched_jobs * i;
}
if (amdgpu_enable_scheduler) {
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amd_sched_rq *rq;
if (kernel)
rq = &adev->rings[i]->sched.kernel_rq;
else
rq = &adev->rings[i]->sched.sched_rq;
if (pri >= AMD_SCHED_MAX_PRIORITY) {
kfree(ctx->fences);
return -EINVAL;
}
rq = &adev->rings[i]->sched.sched_rq[pri];
r = amd_sched_entity_init(&adev->rings[i]->sched,
&ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
......@@ -57,7 +65,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
for (j = 0; j < i; j++)
amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx);
kfree(ctx->fences);
return r;
}
}
......@@ -73,8 +81,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
return;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
for (j = 0; j < amdgpu_sched_jobs; ++j)
fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
if (amdgpu_enable_scheduler) {
for (i = 0; i < adev->num_rings; i++)
......@@ -103,9 +112,13 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return r;
}
*id = (uint32_t)r;
r = amdgpu_ctx_init(adev, false, ctx);
r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
kfree(ctx);
}
mutex_unlock(&mgr->lock);
return r;
}
......@@ -239,7 +252,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
unsigned idx = 0;
struct fence *other = NULL;
idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
if (other) {
signed long r;
......@@ -274,12 +287,12 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
}
if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
if (seq + amdgpu_sched_jobs < cring->sequence) {
spin_unlock(&ctx->ring_lock);
return NULL;
}
fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock);
return fence;
......
......@@ -38,6 +38,7 @@
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_atombios.h"
#include "amd_pcie.h"
#ifdef CONFIG_DRM_AMDGPU_CIK
#include "cik.h"
#endif
......@@ -949,6 +950,15 @@ static bool amdgpu_check_pot_argument(int arg)
*/
static void amdgpu_check_arguments(struct amdgpu_device *adev)
{
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = 4;
} else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
}
/* vramlimit must be a power of two */
if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) {
dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n",
......@@ -1214,12 +1224,14 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
} else {
if (adev->ip_blocks[i].funcs->early_init) {
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
if (r == -ENOENT)
if (r == -ENOENT) {
adev->ip_block_status[i].valid = false;
else if (r)
} else if (r) {
DRM_ERROR("early_init %d failed %d\n", i, r);
return r;
else
} else {
adev->ip_block_status[i].valid = true;
}
} else {
adev->ip_block_status[i].valid = true;
}
......@@ -1237,20 +1249,28 @@ static int amdgpu_init(struct amdgpu_device *adev)
if (!adev->ip_block_status[i].valid)
continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
if (r)
if (r) {
DRM_ERROR("sw_init %d failed %d\n", i, r);
return r;
}
adev->ip_block_status[i].sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
if (r)
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
}
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r)
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
}
r = amdgpu_wb_init(adev);
if (r)
if (r) {
DRM_ERROR("amdgpu_wb_init failed %d\n", r);
return r;
}
adev->ip_block_status[i].hw = true;
}
}
......@@ -1262,8 +1282,10 @@ static int amdgpu_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
continue;
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r)
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
}
adev->ip_block_status[i].hw = true;
}
......@@ -1280,14 +1302,18 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
/* enable clockgating to save power */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_GATE);
if (r)
if (r) {
DRM_ERROR("set_clockgating_state(gate) %d failed %d\n", i, r);
return r;
}
if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (r)
if (r) {
DRM_ERROR("late_init %d failed %d\n", i, r);
return r;
}
}
}
return 0;
}
......@@ -1306,10 +1332,15 @@ static int amdgpu_fini(struct amdgpu_device *adev)
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r)
if (r) {
DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
return r;
}
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini %d failed %d\n", i, r);
}
adev->ip_block_status[i].hw = false;
}
......@@ -1318,6 +1349,9 @@ static int amdgpu_fini(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("sw_fini %d failed %d\n", i, r);
}
adev->ip_block_status[i].sw = false;
adev->ip_block_status[i].valid = false;
}
......@@ -1335,9 +1369,15 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
/* ungate blocks so that suspend can properly shut them down */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
}
/* XXX handle errors */
r = adev->ip_blocks[i].funcs->suspend(adev);
/* XXX handle errors */
if (r) {
DRM_ERROR("suspend %d failed %d\n", i, r);
}
}
return 0;
......@@ -1351,9 +1391,11 @@ static int amdgpu_resume(struct amdgpu_device *adev)
if (!adev->ip_block_status[i].valid)
continue;
r = adev->ip_blocks[i].funcs->resume(adev);
if (r)
if (r) {
DRM_ERROR("resume %d failed %d\n", i, r);
return r;
}
}
return 0;
}
......@@ -1484,8 +1526,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return -EINVAL;
}
r = amdgpu_atombios_init(adev);
if (r)
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
return r;
}
/* Post card if necessary */
if (!amdgpu_card_posted(adev)) {
......@@ -1499,21 +1543,26 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* Initialize clocks */
r = amdgpu_atombios_get_clock_info(adev);
if (r)
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
return r;
}
/* init i2c buses */
amdgpu_atombios_i2c_init(adev);
/* Fence driver */
r = amdgpu_fence_driver_init(adev);
if (r)
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
return r;
}
/* init the mode config */
drm_mode_config_init(adev->ddev);
r = amdgpu_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
amdgpu_fini(adev);
return r;
}
......@@ -1528,7 +1577,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx);
if (r) {
dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
return r;
......@@ -1570,8 +1619,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* explicit gating rather than handling it automatically.
*/
r = amdgpu_late_init(adev);
if (r)
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
return r;
}
return 0;
}
......@@ -1788,6 +1839,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
drm_helper_hpd_irq_event(dev);
if (fbcon) {
amdgpu_fbdev_set_suspend(adev, 0);
......@@ -1881,6 +1933,83 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
return r;
}
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
int ret;
if (pci_is_root_bus(adev->pdev->bus))
return;
if (amdgpu_pcie_gen2 == 0)
return;
if (adev->flags & AMD_IS_APU)
return;
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
if (!ret) {
adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
if (mask & DRM_PCIE_SPEED_25)
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
if (mask & DRM_PCIE_SPEED_50)
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
if (mask & DRM_PCIE_SPEED_80)
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
}
ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
if (!ret) {
switch (mask) {
case 32:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 16:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 12:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 8:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 4:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 2:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 1:
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
}
}
}
/*
* Debugfs
......
......@@ -79,9 +79,10 @@ int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
int amdgpu_exp_hw_support = 0;
int amdgpu_enable_scheduler = 1;
int amdgpu_sched_jobs = 16;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_enable_semaphores = 0;
int amdgpu_powerplay = -1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
......@@ -155,7 +156,7 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
......@@ -164,6 +165,11 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
#ifdef CONFIG_DRM_AMD_POWERPLAY
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
#endif
static struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
......
......@@ -263,7 +263,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
}
if (fb && ret) {
drm_gem_object_unreference(gobj);
drm_gem_object_unreference_unlocked(gobj);
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
......
......@@ -448,7 +448,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry *vm_bos;
struct amdgpu_bo_list_entry vm_pd;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
unsigned domain;
......@@ -461,15 +461,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
tv.shared = true;
list_add(&tv.head, &list);
vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list);
if (!vm_bos)
return;
amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
/* Provide duplicates to avoid -EALREADY */
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_free;
goto error_print;
amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
......@@ -491,9 +490,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
error_unreserve:
ttm_eu_backoff_reservation(&ticket, &list);
error_free:
drm_free_large(vm_bos);
error_print:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
......
This diff is collapsed.
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "atom.h"
#include "amdgpu.h"
#include "amd_shared.h"
#include <linux/module.h>
#include <linux/moduleparam.h>
#include "amdgpu_pm.h"
#include <drm/amdgpu_drm.h>
#include "amdgpu_powerplay.h"
#include "cik_dpm.h"
#include "vi_dpm.h"
static int amdgpu_powerplay_init(struct amdgpu_device *adev)
{
int ret = 0;
struct amd_powerplay *amd_pp;
amd_pp = &(adev->powerplay);
if (adev->pp_enabled) {
#ifdef CONFIG_DRM_AMD_POWERPLAY
struct amd_pp_init *pp_init;
pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL);
if (pp_init == NULL)
return -ENOMEM;
pp_init->chip_family = adev->family;
pp_init->chip_id = adev->asic_type;
pp_init->device = amdgpu_cgs_create_device(adev);
ret = amd_powerplay_init(pp_init, amd_pp);
kfree(pp_init);
#endif
} else {
amd_pp->pp_handle = (void *)adev;
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
break;
#endif
case CHIP_TOPAZ:
amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
break;
case CHIP_TONGA:
amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
break;
case CHIP_FIJI:
amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
amd_pp->ip_funcs = &cz_dpm_ip_funcs;
break;
default:
ret = -EINVAL;
break;
}
}
return ret;
}
static int amdgpu_pp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
#ifdef CONFIG_DRM_AMD_POWERPLAY
switch (adev->asic_type) {
case CHIP_TONGA:
case CHIP_FIJI:
adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
break;
default:
adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
break;
}
#else
adev->pp_enabled = false;
#endif
ret = amdgpu_powerplay_init(adev);
if (ret)
return ret;
if (adev->powerplay.ip_funcs->early_init)
ret = adev->powerplay.ip_funcs->early_init(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_late_init(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->late_init)
ret = adev->powerplay.ip_funcs->late_init(
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled)
amdgpu_pm_sysfs_init(adev);
#endif
return ret;
}
static int amdgpu_pp_sw_init(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->sw_init)
ret = adev->powerplay.ip_funcs->sw_init(
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled) {
if (amdgpu_dpm == 0)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
}
#endif
return ret;
}
static int amdgpu_pp_sw_fini(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->sw_fini)
ret = adev->powerplay.ip_funcs->sw_fini(
adev->powerplay.pp_handle);
if (ret)
return ret;
#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled) {
amdgpu_pm_sysfs_fini(adev);
amd_powerplay_fini(adev->powerplay.pp_handle);
}
#endif
return ret;
}
static int amdgpu_pp_hw_init(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pp_enabled && adev->firmware.smu_load)
amdgpu_ucode_init_bo(adev);
if (adev->powerplay.ip_funcs->hw_init)
ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_hw_fini(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->hw_fini)
ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle);
if (adev->pp_enabled && adev->firmware.smu_load)
amdgpu_ucode_fini_bo(adev);
return ret;
}
static int amdgpu_pp_suspend(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->suspend)
ret = adev->powerplay.ip_funcs->suspend(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_resume(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->resume)
ret = adev->powerplay.ip_funcs->resume(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->set_clockgating_state)
ret = adev->powerplay.ip_funcs->set_clockgating_state(
adev->powerplay.pp_handle, state);
return ret;
}
static int amdgpu_pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->set_powergating_state)
ret = adev->powerplay.ip_funcs->set_powergating_state(
adev->powerplay.pp_handle, state);
return ret;
}
static bool amdgpu_pp_is_idle(void *handle)
{
bool ret = true;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->is_idle)
ret = adev->powerplay.ip_funcs->is_idle(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_wait_for_idle(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->wait_for_idle)
ret = adev->powerplay.ip_funcs->wait_for_idle(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_soft_reset(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->soft_reset)
ret = adev->powerplay.ip_funcs->soft_reset(
adev->powerplay.pp_handle);
return ret;
}
static void amdgpu_pp_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->print_status)
adev->powerplay.ip_funcs->print_status(
adev->powerplay.pp_handle);
}
const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
.sw_init = amdgpu_pp_sw_init,
.sw_fini = amdgpu_pp_sw_fini,
.hw_init = amdgpu_pp_hw_init,
.hw_fini = amdgpu_pp_hw_fini,
.suspend = amdgpu_pp_suspend,
.resume = amdgpu_pp_resume,
.is_idle = amdgpu_pp_is_idle,
.wait_for_idle = amdgpu_pp_wait_for_idle,
.soft_reset = amdgpu_pp_soft_reset,
.print_status = amdgpu_pp_print_status,
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __AMDGPU_POPWERPLAY_H__
#define __AMDGPU_POPWERPLAY_H__
#include "amd_shared.h"
extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
#endif /* __AMDSOC_DM_H__ */
......@@ -75,50 +75,50 @@ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
}
/**
* amdgpu_vm_get_bos - add the vm BOs to a validation list
* amdgpu_vm_get_pd_bo - add the VM PD to a validation list
*
* @vm: vm providing the BOs
* @head: head of validation list
* @validated: head of validation list
* @entry: entry to add
*
* Add the page directory to the list of BOs to
* validate for command submission (cayman+).
* validate for command submission.
*/
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *head)
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry)
{
struct amdgpu_bo_list_entry *list;
unsigned i, idx;
entry->robj = vm->page_directory;
entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->priority = 0;
entry->tv.bo = &vm->page_directory->tbo;
entry->tv.shared = true;
list_add(&entry->tv.head, validated);
}
list = drm_malloc_ab(vm->max_pde_used + 2,
sizeof(struct amdgpu_bo_list_entry));
if (!list) {
return NULL;
}
/**
* amdgpu_vm_get_bos - add the vm BOs to a duplicates list
*
* @vm: vm providing the BOs
* @duplicates: head of duplicates list
*
* Add the page directory to the BO duplicates list
* for command submission.
*/
void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
{
unsigned i;
/* add the vm page table to the list */
list[0].robj = vm->page_directory;
list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
list[0].priority = 0;
list[0].tv.bo = &vm->page_directory->tbo;
list[0].tv.shared = true;
list_add(&list[0].tv.head, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
if (!entry->robj)
continue;
list[idx].robj = vm->page_tables[i].bo;
list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
list[idx].priority = 0;
list[idx].tv.bo = &list[idx].robj->tbo;
list[idx].tv.shared = true;
list_add(&list[idx++].tv.head, head);
list_add(&entry->tv.head, duplicates);
}
return list;
}
/**
......@@ -461,7 +461,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
uint64_t pde, pt;
if (bo == NULL)
......@@ -638,7 +638,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> amdgpu_vm_block_size;
struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo;
struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
unsigned nptes;
uint64_t pte;
int r;
......@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size;
eaddr = saddr + size - 1;
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
return -EINVAL;
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
if (last_pfn > adev->vm_manager.max_pfn) {
dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
if (last_pfn >= adev->vm_manager.max_pfn) {
dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
last_pfn, adev->vm_manager.max_pfn);
return -EINVAL;
}
......@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
eaddr /= AMDGPU_GPU_PAGE_SIZE;
spin_lock(&vm->it_lock);
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
it = interval_tree_iter_first(&vm->va, saddr, eaddr);
spin_unlock(&vm->it_lock);
if (it) {
struct amdgpu_bo_va_mapping *tmp;
......@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
INIT_LIST_HEAD(&mapping->list);
mapping->it.start = saddr;
mapping->it.last = eaddr - 1;
mapping->it.last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
......@@ -1070,9 +1070,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
struct amdgpu_bo_list_entry *entry;
struct amdgpu_bo *pt;
if (vm->page_tables[pt_idx].bo)
entry = &vm->page_tables[pt_idx].entry;
if (entry->robj)
continue;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
......@@ -1094,8 +1096,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
goto error_free;
}
entry->robj = pt;
entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
vm->page_tables[pt_idx].addr = 0;
vm->page_tables[pt_idx].bo = pt;
}
return 0;
......@@ -1326,7 +1333,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
amdgpu_bo_unref(&vm->page_tables[i].bo);
amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
kfree(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory);
......
......@@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA
/* convert bits per color to bits per pixel */
/* get bpc from the EDID */
static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
{
if (bpc == 0)
return 24;
......@@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
return bpc * 3;
}
/* get the max pix clock supported by the link rate and lane num */
static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
int lane_num,
int bpp)
{
return (link_rate * lane_num * 8) / bpp;
}
/***** amdgpu specific DP functions *****/
/* First get the min lane# when low rate is used according to pixel clock
* (prefer low rate), second check max lane# supported by DP panel,
* if the max lane# < low rate lane# then use max lane# instead.
*/
static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
const u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
unsigned pix_clock,
unsigned *dp_lanes, unsigned *dp_rate)
{
int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
int max_link_rate = drm_dp_max_link_rate(dpcd);
int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
if (pix_clock <= max_dp_pix_clock)
break;
unsigned bpp =
amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
static const unsigned link_rates[3] = { 162000, 270000, 540000 };
unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock;
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
return 0;
}
}
return lane_num;
}
static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
const u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
int lane_num, max_pix_clock;
if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG)
return 270000;
lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 162000;
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 270000;
if (amdgpu_connector_is_dp12_capable(connector)) {
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 540000;
}
return drm_dp_max_link_rate(dpcd);
return -EINVAL;
}
static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
......@@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
int ret;
if (!amdgpu_connector->con_priv)
return;
......@@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
dig_connector->dp_clock =
amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
dig_connector->dp_lane_count =
amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
mode->clock,
&dig_connector->dp_lane_count,
&dig_connector->dp_clock);
if (ret) {
dig_connector->dp_clock = 0;
dig_connector->dp_lane_count = 0;
}
}
}
......@@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
int dp_clock;
unsigned dp_lanes, dp_clock;
int ret;
if (!amdgpu_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = amdgpu_connector->con_priv;
dp_clock =
amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
mode->clock, &dp_lanes, &dp_clock);
if (ret)
return MODE_CLOCK_HIGH;
if ((dp_clock == 540000) &&
(!amdgpu_connector_is_dp12_capable(connector)))
......
......@@ -32,6 +32,7 @@
#include "amdgpu_vce.h"
#include "cikd.h"
#include "atom.h"
#include "amd_pcie.h"
#include "cik.h"
#include "gmc_v7_0.h"
......@@ -65,6 +66,7 @@
#include "oss/oss_2_0_sh_mask.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h"
/*
* Indirect registers accessor
......@@ -929,6 +931,37 @@ static bool cik_read_disabled_bios(struct amdgpu_device *adev)
return r;
}
static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
{
u32 *dw_ptr;
unsigned long flags;
u32 i, length_dw;
if (bios == NULL)
return false;
if (length_bytes == 0)
return false;
/* APU vbios image is part of sbios image */
if (adev->flags & AMD_IS_APU)
return false;
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
/* take the smc lock since we are using the smc index */
spin_lock_irqsave(&adev->smc_idx_lock, flags);
/* set rom index to 0 */
WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
WREG32(mmSMC_IND_DATA_0, 0);
/* set index to data for continous read */
WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return true;
}
static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false},
......@@ -1563,8 +1596,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
int bridge_pos, gpu_pos;
u32 speed_cntl, mask, current_data_rate;
int ret, i;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
if (pci_is_root_bus(adev->pdev->bus))
......@@ -1576,23 +1609,20 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return;
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
if (ret != 0)
return;
if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (mask & DRM_PCIE_SPEED_80) {
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
} else if (mask & DRM_PCIE_SPEED_50) {
} else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
......@@ -1608,7 +1638,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
if (!gpu_pos)
return;
if (mask & DRM_PCIE_SPEED_80) {
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
/* re-try equalization if gen3 is not already enabled */
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
......@@ -1703,9 +1733,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf;
if (mask & DRM_PCIE_SPEED_80)
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
tmp16 |= 3; /* gen3 */
else if (mask & DRM_PCIE_SPEED_50)
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
tmp16 |= 2; /* gen2 */
else
tmp16 |= 1; /* gen1 */
......@@ -1922,7 +1952,7 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
.major = 7,
.minor = 0,
.rev = 0,
.funcs = &ci_dpm_ip_funcs,
.funcs = &amdgpu_pp_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_DCE,
......@@ -1990,7 +2020,7 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
.major = 7,
.minor = 0,
.rev = 0,
.funcs = &ci_dpm_ip_funcs,
.funcs = &amdgpu_pp_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_DCE,
......@@ -2058,7 +2088,7 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
.major = 7,
.minor = 0,
.rev = 0,
.funcs = &kv_dpm_ip_funcs,
.funcs = &amdgpu_pp_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_DCE,
......@@ -2126,7 +2156,7 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
.major = 7,
.minor = 0,
.rev = 0,
.funcs = &kv_dpm_ip_funcs,
.funcs = &amdgpu_pp_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_DCE,
......@@ -2194,7 +2224,7 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
.major = 7,
.minor = 0,
.rev = 0,
.funcs = &kv_dpm_ip_funcs,
.funcs = &amdgpu_pp_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_DCE,
......@@ -2267,6 +2297,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
.read_bios_from_rom = &cik_read_bios_from_rom,
.read_register = &cik_read_register,
.reset = &cik_asic_reset,
.set_vga_state = &cik_vga_set_state,
......@@ -2417,6 +2448,8 @@ static int cik_common_early_init(void *handle)
return -EINVAL;
}
amdgpu_get_pcie_info(adev);
return 0;
}
......
......@@ -24,7 +24,7 @@
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
#include "fiji_smumgr.h"
#include "fiji_smum.h"
MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
......
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef FIJI_PP_SMC_H
#define FIJI_PP_SMC_H
#pragma pack(push, 1)
#define PPSMC_SWSTATE_FLAG_DC 0x01
#define PPSMC_SWSTATE_FLAG_UVD 0x02
#define PPSMC_SWSTATE_FLAG_VCE 0x04
#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
#define PPSMC_SYSTEMFLAG_GDDR5 0x04
#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
#define PPSMC_DPM2FLAGS_OCP 0x04
#define PPSMC_DISPLAY_WATERMARK_LOW 0
#define PPSMC_DISPLAY_WATERMARK_HIGH 1
#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
#define PPSMC_STATEFLAG_POWERBOOST 0x02
#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
#define PPSMC_STATEFLAG_POWERSHIFT 0x08
#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
#define FDO_MODE_HARDWARE 0
#define FDO_MODE_PIECE_WISE_LINEAR 1
enum FAN_CONTROL {
FAN_CONTROL_FUZZY,
FAN_CONTROL_TABLE
};
//Gemini Modes
#define PPSMC_GeminiModeNone 0 //Single GPU board
#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board
#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board
#define PPSMC_Result_OK ((uint16_t)0x01)
#define PPSMC_Result_NoMore ((uint16_t)0x02)
#define PPSMC_Result_NotNow ((uint16_t)0x03)
#define PPSMC_Result_Failed ((uint16_t)0xFF)
#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
typedef uint16_t PPSMC_Result;
#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
#define PPSMC_MSG_Halt ((uint16_t)0x10)
#define PPSMC_MSG_Resume ((uint16_t)0x11)
#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
#define PPSMC_CACHistoryStart ((uint16_t)0x57)
#define PPSMC_CACHistoryStop ((uint16_t)0x58)
#define PPSMC_TDPClampingActive ((uint16_t)0x59)
#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
#define PPSMC_StartFanControl ((uint16_t)0x5B)
#define PPSMC_StopFanControl ((uint16_t)0x5C)
#define PPSMC_NoDisplay ((uint16_t)0x5D)
#define PPSMC_HasDisplay ((uint16_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
#define PPSMC_OCPActive ((uint16_t)0x6C)
#define PPSMC_OCPInactive ((uint16_t)0x6D)
#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
#define PPSMC_FlushDataCache ((uint16_t)0x80)
#define PPSMC_FlushInstrCache ((uint16_t)0x81)
#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
#define PPSMC_MSG_Test ((uint16_t)0x100)
#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250)
#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251)
#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252)
#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253)
#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254)
typedef uint16_t PPSMC_Msg;
#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
#define PPSMC_EVENT_STATUS_DC 0x00000004
#define PPSMC_EVENT_STATUS_GPIO17 0x00000008
#pragma pack(pop)
#endif
......@@ -25,7 +25,7 @@
#include "drmP.h"
#include "amdgpu.h"
#include "fiji_ppsmc.h"
#include "fiji_smumgr.h"
#include "fiji_smum.h"
#include "smu_ucode_xfer_vi.h"
#include "amdgpu_ucode.h"
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -596,6 +596,7 @@
#define mmSWRST_EP_CONTROL_0 0x14ac
#define mmCPM_CONTROL 0x14b8
#define mmGSKT_CONTROL 0x14bf
#define ixSWRST_COMMAND_1 0x1400103
#define ixLM_CONTROL 0x1400120
#define ixLM_PCIETXMUX0 0x1400121
#define ixLM_PCIETXMUX1 0x1400122
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment