Commit 754270c7 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-next

First feature pull for 4.15.  Highlights:
- Per VM BO support
- Lots of powerplay cleanups
- Powerplay support for CI
- pasid mgr for kfd
- interrupt infrastructure for recoverable page faults
- SR-IOV fixes
- initial GPU reset for vega10
- prime mmap support
- ttm page table debugging improvements
- lots of bug fixes

* 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux: (232 commits)
  drm/amdgpu: clarify license in amdgpu_trace_points.c
  drm/amdgpu: Add gem_prime_mmap support
  drm/amd/powerplay: delete dead code in smumgr
  drm/amd/powerplay: delete SMUM_FIELD_MASK
  drm/amd/powerplay: delete SMUM_WAIT_INDIRECT_FIELD
  drm/amd/powerplay: delete SMUM_READ_FIELD
  drm/amd/powerplay: delete SMUM_SET_FIELD
  drm/amd/powerplay: delete SMUM_READ_VFPF_INDIRECT_FIELD
  drm/amd/powerplay: delete SMUM_WRITE_VFPF_INDIRECT_FIELD
  drm/amd/powerplay: delete SMUM_WRITE_FIELD
  drm/amd/powerplay: delete SMU_WRITE_INDIRECT_FIELD
  drm/amd/powerplay: move macros to hwmgr.h
  drm/amd/powerplay: move PHM_WAIT_VFPF_INDIRECT_FIELD to hwmgr.h
  drm/amd/powerplay: move SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL to hwmgr.h
  drm/amd/powerplay: move SMUM_WAIT_INDIRECT_FIELD_UNEQUAL to hwmgr.h
  drm/amd/powerplay: add new helper functions in hwmgr.h
  drm/amd/powerplay: use SMU_IND_INDEX/DATA_11 pair
  drm/amd/powerplay: refine powerplay code.
  drm/amd/powerplay: delete dead code in hwmgr.h
  drm/amd/powerplay: refine interface in struct pp_smumgr_func
  ...
parents 9afafdbf 6f87a895
...@@ -184,6 +184,7 @@ config DRM_AMDGPU ...@@ -184,6 +184,7 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE select INTERVAL_TREE
select CHASH
help help
Choose this option if you have a recent AMD Radeon graphics card. Choose this option if you have a recent AMD Radeon graphics card.
...@@ -191,6 +192,8 @@ config DRM_AMDGPU ...@@ -191,6 +192,8 @@ config DRM_AMDGPU
source "drivers/gpu/drm/amd/amdgpu/Kconfig" source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/amd/lib/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig" source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig" source "drivers/gpu/drm/i915/Kconfig"
......
...@@ -50,6 +50,7 @@ obj-$(CONFIG_DRM_ARM) += arm/ ...@@ -50,6 +50,7 @@ obj-$(CONFIG_DRM_ARM) += arm/
obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/ obj-$(CONFIG_DRM_R128) += r128/
obj-y += amd/lib/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
......
...@@ -133,5 +133,3 @@ include $(FULL_AMD_PATH)/powerplay/Makefile ...@@ -133,5 +133,3 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES) amdgpu-y += $(AMD_POWERPLAY_FILES)
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
CFLAGS_amdgpu_trace_points.o := -I$(src)
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
#include "amdgpu_uvd.h" #include "amdgpu_uvd.h"
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
#include "amdgpu_vcn.h" #include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
#include "amdgpu_virt.h" #include "amdgpu_virt.h"
...@@ -91,7 +92,7 @@ extern int amdgpu_dpm; ...@@ -91,7 +92,7 @@ extern int amdgpu_dpm;
extern int amdgpu_fw_load_type; extern int amdgpu_fw_load_type;
extern int amdgpu_aspm; extern int amdgpu_aspm;
extern int amdgpu_runtime_pm; extern int amdgpu_runtime_pm;
extern unsigned amdgpu_ip_block_mask; extern uint amdgpu_ip_block_mask;
extern int amdgpu_bapm; extern int amdgpu_bapm;
extern int amdgpu_deep_color; extern int amdgpu_deep_color;
extern int amdgpu_vm_size; extern int amdgpu_vm_size;
...@@ -104,14 +105,14 @@ extern int amdgpu_sched_jobs; ...@@ -104,14 +105,14 @@ extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission; extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict; extern int amdgpu_no_evict;
extern int amdgpu_direct_gma_size; extern int amdgpu_direct_gma_size;
extern unsigned amdgpu_pcie_gen_cap; extern uint amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap; extern uint amdgpu_pcie_lane_cap;
extern unsigned amdgpu_cg_mask; extern uint amdgpu_cg_mask;
extern unsigned amdgpu_pg_mask; extern uint amdgpu_pg_mask;
extern unsigned amdgpu_sdma_phase_quantum; extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu; extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display; extern char *amdgpu_virtual_display;
extern unsigned amdgpu_pp_feature_mask; extern uint amdgpu_pp_feature_mask;
extern int amdgpu_vram_page_split; extern int amdgpu_vram_page_split;
extern int amdgpu_ngg; extern int amdgpu_ngg;
extern int amdgpu_prim_buf_per_se; extern int amdgpu_prim_buf_per_se;
...@@ -178,6 +179,7 @@ struct amdgpu_cs_parser; ...@@ -178,6 +179,7 @@ struct amdgpu_cs_parser;
struct amdgpu_job; struct amdgpu_job;
struct amdgpu_irq_src; struct amdgpu_irq_src;
struct amdgpu_fpriv; struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
enum amdgpu_cp_irq { enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0, AMDGPU_CP_IRQ_GFX_EOP = 0,
...@@ -292,14 +294,25 @@ struct amdgpu_buffer_funcs { ...@@ -292,14 +294,25 @@ struct amdgpu_buffer_funcs {
/* provided by hw blocks that can write ptes, e.g., sdma */ /* provided by hw blocks that can write ptes, e.g., sdma */
struct amdgpu_vm_pte_funcs { struct amdgpu_vm_pte_funcs {
/* number of dw to reserve per operation */
unsigned copy_pte_num_dw;
/* copy pte entries from GART */ /* copy pte entries from GART */
void (*copy_pte)(struct amdgpu_ib *ib, void (*copy_pte)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src, uint64_t pe, uint64_t src,
unsigned count); unsigned count);
/* write pte one entry at a time with addr mapping */ /* write pte one entry at a time with addr mapping */
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count, uint64_t value, unsigned count,
uint32_t incr); uint32_t incr);
/* maximum nums of PTEs/PDEs in a single operation */
uint32_t set_max_nums_pte_pde;
/* number of dw to reserve per operation */
unsigned set_pte_pde_num_dw;
/* for linear pte/pde updates without addr mapping */ /* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib, void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t pe,
...@@ -332,6 +345,7 @@ struct amdgpu_gart_funcs { ...@@ -332,6 +345,7 @@ struct amdgpu_gart_funcs {
struct amdgpu_ih_funcs { struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */ /* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev); u32 (*get_wptr)(struct amdgpu_device *adev);
bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev, void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry); struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev); void (*set_rptr)(struct amdgpu_device *adev);
...@@ -399,6 +413,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); ...@@ -399,6 +413,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
/* sub-allocation manager, it has to be protected by another lock. /* sub-allocation manager, it has to be protected by another lock.
...@@ -455,9 +470,10 @@ struct amdgpu_sa_bo { ...@@ -455,9 +470,10 @@ struct amdgpu_sa_bo {
*/ */
void amdgpu_gem_force_release(struct amdgpu_device *adev); void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain, int alignment, u32 initial_domain,
u64 flags, bool kernel, u64 flags, bool kernel,
struct drm_gem_object **obj); struct reservation_object *resv,
struct drm_gem_object **obj);
int amdgpu_mode_dumb_create(struct drm_file *file_priv, int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
...@@ -731,8 +747,8 @@ struct amdgpu_ctx_mgr { ...@@ -731,8 +747,8 @@ struct amdgpu_ctx_mgr {
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx); int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *fence); struct dma_fence *fence, uint64_t *seq);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq); struct amdgpu_ring *ring, uint64_t seq);
...@@ -1014,7 +1030,6 @@ struct amdgpu_gfx { ...@@ -1014,7 +1030,6 @@ struct amdgpu_gfx {
/* reset mask */ /* reset mask */
uint32_t grbm_soft_reset; uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
bool in_reset;
/* s3/s4 mask */ /* s3/s4 mask */
bool in_suspend; bool in_suspend;
/* NGG */ /* NGG */
...@@ -1056,6 +1071,7 @@ struct amdgpu_cs_parser { ...@@ -1056,6 +1071,7 @@ struct amdgpu_cs_parser {
/* buffer objects */ /* buffer objects */
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct amdgpu_bo_list *bo_list; struct amdgpu_bo_list *bo_list;
struct amdgpu_mn *mn;
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
struct list_head validated; struct list_head validated;
struct dma_fence *fence; struct dma_fence *fence;
...@@ -1183,6 +1199,9 @@ struct amdgpu_firmware { ...@@ -1183,6 +1199,9 @@ struct amdgpu_firmware {
/* gpu info firmware data pointer */ /* gpu info firmware data pointer */
const struct firmware *gpu_info_fw; const struct firmware *gpu_info_fw;
void *fw_buf_ptr;
uint64_t fw_buf_mc;
}; };
/* /*
...@@ -1196,20 +1215,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); ...@@ -1196,20 +1215,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
*/ */
void amdgpu_test_moves(struct amdgpu_device *adev); void amdgpu_test_moves(struct amdgpu_device *adev);
/*
* MMU Notifier
*/
#if defined(CONFIG_MMU_NOTIFIER)
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
#else
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
return -ENODEV;
}
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
#endif
/* /*
* Debugfs * Debugfs
*/ */
...@@ -1592,6 +1597,7 @@ struct amdgpu_device { ...@@ -1592,6 +1597,7 @@ struct amdgpu_device {
/* record last mm index being written through WREG32*/ /* record last mm index being written through WREG32*/
unsigned long last_mm_index; unsigned long last_mm_index;
bool in_sriov_reset;
}; };
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
...@@ -1759,6 +1765,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) ...@@ -1759,6 +1765,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
...@@ -1791,18 +1798,6 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, ...@@ -1791,18 +1798,6 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes); u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
...@@ -1885,10 +1880,9 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } ...@@ -1885,10 +1880,9 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
#endif #endif
struct amdgpu_bo_va_mapping * int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo,
uint64_t addr, struct amdgpu_bo **bo); struct amdgpu_bo_va_mapping **mapping);
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
#include "amdgpu_object.h" #include "amdgpu_object.h"
#endif #endif
...@@ -169,6 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = { ...@@ -169,6 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size, .get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter, .get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
.alloc_pasid = amdgpu_vm_alloc_pasid,
.free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings, .program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline, .init_pipeline = kgd_init_pipeline,
......
...@@ -128,6 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = { ...@@ -128,6 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size, .get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter, .get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
.alloc_pasid = amdgpu_vm_alloc_pasid,
.free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings, .program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline, .init_pipeline = kgd_init_pipeline,
......
...@@ -45,7 +45,6 @@ struct amdgpu_cgs_device { ...@@ -45,7 +45,6 @@ struct amdgpu_cgs_device {
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type, enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align, uint64_t size, uint64_t align,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle) cgs_handle_t *handle)
{ {
CGS_FUNC_ADEV; CGS_FUNC_ADEV;
...@@ -53,13 +52,6 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, ...@@ -53,13 +52,6 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
int ret = 0; int ret = 0;
uint32_t domain = 0; uint32_t domain = 0;
struct amdgpu_bo *obj; struct amdgpu_bo *obj;
struct ttm_placement placement;
struct ttm_place place;
if (min_offset > max_offset) {
BUG_ON(1);
return -EINVAL;
}
/* fail if the alignment is not a power of 2 */ /* fail if the alignment is not a power of 2 */
if (((align != 1) && (align & (align - 1))) if (((align != 1) && (align & (align - 1)))
...@@ -73,41 +65,19 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, ...@@ -73,41 +65,19 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM; domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size)
return -EINVAL;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
break; break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB: case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS | flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM; domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn =
max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
place.lpfn =
min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
}
break; break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE: case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
domain = AMDGPU_GEM_DOMAIN_GTT; domain = AMDGPU_GEM_DOMAIN_GTT;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
break; break;
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
domain = AMDGPU_GEM_DOMAIN_GTT; domain = AMDGPU_GEM_DOMAIN_GTT;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
TTM_PL_FLAG_UNCACHED;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -116,15 +86,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, ...@@ -116,15 +86,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
*handle = 0; *handle = 0;
placement.placement = &place; ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
placement.num_placement = 1; NULL, NULL, 0, &obj);
placement.busy_placement = &place;
placement.num_busy_placement = 1;
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
true, domain, flags,
NULL, &placement, NULL,
0, &obj);
if (ret) { if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret); DRM_ERROR("(%d) bo create failed\n", ret);
return ret; return ret;
...@@ -155,19 +118,14 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h ...@@ -155,19 +118,14 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
uint64_t *mcaddr) uint64_t *mcaddr)
{ {
int r; int r;
u64 min_offset, max_offset;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
WARN_ON_ONCE(obj->placement.num_placement > 1); WARN_ON_ONCE(obj->placement.num_placement > 1);
min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
r = amdgpu_bo_reserve(obj, true); r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains, r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj); amdgpu_bo_unreserve(obj);
return r; return r;
} }
...@@ -675,6 +633,85 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ...@@ -675,6 +633,85 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) { if (!adev->pm.fw) {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_TAHITI:
strcpy(fw_name, "radeon/tahiti_smc.bin");
break;
case CHIP_PITCAIRN:
if ((adev->pdev->revision == 0x81) &&
((adev->pdev->device == 0x6810) ||
(adev->pdev->device == 0x6811))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
} else {
strcpy(fw_name, "radeon/pitcairn_smc.bin");
}
break;
case CHIP_VERDE:
if (((adev->pdev->device == 0x6820) &&
((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83))) ||
((adev->pdev->device == 0x6821) &&
((adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0x87))) ||
((adev->pdev->revision == 0x87) &&
((adev->pdev->device == 0x6823) ||
(adev->pdev->device == 0x682b)))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/verde_k_smc.bin");
} else {
strcpy(fw_name, "radeon/verde_smc.bin");
}
break;
case CHIP_OLAND:
if (((adev->pdev->revision == 0x81) &&
((adev->pdev->device == 0x6600) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605) ||
(adev->pdev->device == 0x6610))) ||
((adev->pdev->revision == 0x83) &&
(adev->pdev->device == 0x6610))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/oland_k_smc.bin");
} else {
strcpy(fw_name, "radeon/oland_smc.bin");
}
break;
case CHIP_HAINAN:
if (((adev->pdev->revision == 0x81) &&
(adev->pdev->device == 0x6660)) ||
((adev->pdev->revision == 0x83) &&
((adev->pdev->device == 0x6660) ||
(adev->pdev->device == 0x6663) ||
(adev->pdev->device == 0x6665) ||
(adev->pdev->device == 0x6667)))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/hainan_k_smc.bin");
} else if ((adev->pdev->revision == 0xc3) &&
(adev->pdev->device == 0x6665)) {
info->is_kicker = true;
strcpy(fw_name, "radeon/banks_k_2_smc.bin");
} else {
strcpy(fw_name, "radeon/hainan_smc.bin");
}
break;
case CHIP_BONAIRE:
if ((adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
strcpy(fw_name, "radeon/bonaire_k_smc.bin");
} else {
strcpy(fw_name, "radeon/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
strcpy(fw_name, "radeon/hawaii_k_smc.bin");
} else {
strcpy(fw_name, "radeon/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ: case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
...@@ -838,6 +875,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, ...@@ -838,6 +875,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID: case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
sys_info->value = adev->pdev->subsystem_vendor; sys_info->value = adev->pdev->subsystem_vendor;
break; break;
case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN:
sys_info->value = adev->pdev->devfn;
break;
default: default:
return -ENODEV; return -ENODEV;
} }
......
...@@ -346,10 +346,8 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector) ...@@ -346,10 +346,8 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
{ {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->edid) { kfree(amdgpu_connector->edid);
kfree(amdgpu_connector->edid); amdgpu_connector->edid = NULL;
amdgpu_connector->edid = NULL;
}
} }
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector) static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
......
...@@ -473,11 +473,16 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, ...@@ -473,11 +473,16 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
return -EPERM; return -EPERM;
/* Check if we have user pages and nobody bound the BO already */ /* Check if we have user pages and nobody bound the BO already */
if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) { if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
size_t size = sizeof(struct page *); lobj->user_pages) {
amdgpu_ttm_placement_from_domain(bo,
size *= bo->tbo.ttm->num_pages; AMDGPU_GEM_DOMAIN_CPU);
memcpy(bo->tbo.ttm->pages, lobj->user_pages, size); r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
false);
if (r)
return r;
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
binding_userptr = true; binding_userptr = true;
} }
...@@ -502,7 +507,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -502,7 +507,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e; struct amdgpu_bo_list_entry *e;
struct list_head duplicates; struct list_head duplicates;
bool need_mmap_lock = false;
unsigned i, tries = 10; unsigned i, tries = 10;
int r; int r;
...@@ -510,9 +514,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -510,9 +514,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (p->bo_list) { if (p->bo_list) {
need_mmap_lock = p->bo_list->first_userptr !=
p->bo_list->num_entries;
amdgpu_bo_list_get_list(p->bo_list, &p->validated); amdgpu_bo_list_get_list(p->bo_list, &p->validated);
if (p->bo_list->first_userptr != p->bo_list->num_entries)
p->mn = amdgpu_mn_get(p->adev);
} }
INIT_LIST_HEAD(&duplicates); INIT_LIST_HEAD(&duplicates);
...@@ -521,9 +525,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -521,9 +525,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_entry.robj) if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated); list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
while (1) { while (1) {
struct list_head need_pages; struct list_head need_pages;
unsigned i; unsigned i;
...@@ -543,23 +544,25 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -543,23 +544,25 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&need_pages); INIT_LIST_HEAD(&need_pages);
for (i = p->bo_list->first_userptr; for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) { i < p->bo_list->num_entries; ++i) {
struct amdgpu_bo *bo;
e = &p->bo_list->array[i]; e = &p->bo_list->array[i];
bo = e->robj;
if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm, if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) { &e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody /* We acquired a page array, but somebody
* invalidated it. Free it and try again * invalidated it. Free it and try again
*/ */
release_pages(e->user_pages, release_pages(e->user_pages,
e->robj->tbo.ttm->num_pages, bo->tbo.ttm->num_pages,
false); false);
kvfree(e->user_pages); kvfree(e->user_pages);
e->user_pages = NULL; e->user_pages = NULL;
} }
if (e->robj->tbo.ttm->state != tt_bound && if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
!e->user_pages) { !e->user_pages) {
list_del(&e->tv.head); list_del(&e->tv.head);
list_add(&e->tv.head, &need_pages); list_add(&e->tv.head, &need_pages);
...@@ -636,9 +639,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -636,9 +639,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis); p->bytes_moved_vis);
fpriv->vm.last_eviction_counter =
atomic64_read(&p->adev->num_evictions);
if (p->bo_list) { if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj; struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj; struct amdgpu_bo *gws = p->bo_list->gws_obj;
...@@ -679,9 +679,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -679,9 +679,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
error_free_pages: error_free_pages:
if (need_mmap_lock)
up_read(&current->mm->mmap_sem);
if (p->bo_list) { if (p->bo_list) {
for (i = p->bo_list->first_userptr; for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) { i < p->bo_list->num_entries; ++i) {
...@@ -728,11 +725,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, ...@@ -728,11 +725,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{ {
unsigned i; unsigned i;
if (!error) if (error && backoff)
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->fence);
else if (backoff)
ttm_eu_backoff_reservation(&parser->ticket, ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated); &parser->validated);
...@@ -768,10 +761,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -768,10 +761,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r) if (r)
return r; return r;
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
if (r)
return r;
r = amdgpu_vm_clear_freed(adev, vm, NULL); r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r) if (r)
return r; return r;
...@@ -825,7 +814,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -825,7 +814,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
} }
r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync); r = amdgpu_vm_handle_moved(adev, vm);
if (r)
return r;
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
if (r)
return r;
if (amdgpu_vm_debug && p->bo_list) { if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */ /* Invalidate all BOs to test for userspace bugs */
...@@ -835,7 +830,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -835,7 +830,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (!bo) if (!bo)
continue; continue;
amdgpu_vm_bo_invalidate(adev, bo); amdgpu_vm_bo_invalidate(adev, bo, false);
} }
} }
...@@ -860,7 +855,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, ...@@ -860,7 +855,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
} }
if (p->job->vm) { if (p->job->vm) {
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo); p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
r = amdgpu_bo_vm_update_pte(p); r = amdgpu_bo_vm_update_pte(p);
if (r) if (r)
...@@ -928,11 +923,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -928,11 +923,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
uint64_t offset; uint64_t offset;
uint8_t *kptr; uint8_t *kptr;
m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start, r = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
&aobj); &aobj, &m);
if (!aobj) { if (r) {
DRM_ERROR("IB va_start is invalid\n"); DRM_ERROR("IB va_start is invalid\n");
return -EINVAL; return r;
} }
if ((chunk_ib->va_start + chunk_ib->ib_bytes) > if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
...@@ -1133,14 +1128,31 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1133,14 +1128,31 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_ring *ring = p->job->ring; struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job; struct amdgpu_job *job;
unsigned i;
uint64_t seq;
int r; int r;
amdgpu_mn_lock(p->mn);
if (p->bo_list) {
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
amdgpu_mn_unlock(p->mn);
return -ERESTARTSYS;
}
}
}
job = p->job; job = p->job;
p->job = NULL; p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) { if (r) {
amdgpu_job_free(job); amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
return r; return r;
} }
...@@ -1148,14 +1160,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1148,14 +1160,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->fence_ctx = entity->fence_context; job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished); p->fence = dma_fence_get(&job->base.s_fence->finished);
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
if (r) {
dma_fence_put(p->fence);
dma_fence_put(&job->base.s_fence->finished);
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
return r;
}
amdgpu_cs_post_dependencies(p); amdgpu_cs_post_dependencies(p);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); cs->out.handle = seq;
job->uf_sequence = cs->out.handle; job->uf_sequence = seq;
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
trace_amdgpu_cs_ioctl(job); trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base); amd_sched_entity_push_job(&job->base);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
return 0; return 0;
} }
...@@ -1383,6 +1409,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, ...@@ -1383,6 +1409,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
array[i] = fence; array[i] = fence;
} else { /* NULL, the fence has been already signaled */ } else { /* NULL, the fence has been already signaled */
r = 1; r = 1;
first = i;
goto out; goto out;
} }
} }
...@@ -1462,78 +1489,36 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, ...@@ -1462,78 +1489,36 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
* virtual memory address. Returns allocation structure when found, NULL * virtual memory address. Returns allocation structure when found, NULL
* otherwise. * otherwise.
*/ */
struct amdgpu_bo_va_mapping * int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo,
uint64_t addr, struct amdgpu_bo **bo) struct amdgpu_bo_va_mapping **map)
{ {
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
unsigned i; int r;
if (!parser->bo_list)
return NULL;
addr /= AMDGPU_GPU_PAGE_SIZE; addr /= AMDGPU_GPU_PAGE_SIZE;
for (i = 0; i < parser->bo_list->num_entries; i++) { mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
struct amdgpu_bo_list_entry *lobj; if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
return -EINVAL;
lobj = &parser->bo_list->array[i];
if (!lobj->bo_va)
continue;
list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
if (mapping->start > addr ||
addr > mapping->last)
continue;
*bo = lobj->bo_va->base.bo;
return mapping;
}
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
if (mapping->start > addr ||
addr > mapping->last)
continue;
*bo = lobj->bo_va->base.bo; *bo = mapping->bo_va->base.bo;
return mapping; *map = mapping;
}
}
return NULL; /* Double check that the BO is reserved by this CS */
} if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
return -EINVAL;
/** r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
* amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM if (unlikely(r))
* return r;
* @parser: command submission parser context
*
* Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
*/
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
{
unsigned i;
int r;
if (!parser->bo_list) if ((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
return 0; return 0;
for (i = 0; i < parser->bo_list->num_entries; i++) { (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
struct amdgpu_bo *bo = parser->bo_list->array[i].robj; amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
return ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, false);
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r))
return r;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
continue;
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r))
return r;
}
return 0;
} }
...@@ -246,8 +246,8 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) ...@@ -246,8 +246,8 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
return 0; return 0;
} }
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *fence) struct dma_fence *fence, uint64_t* handler)
{ {
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence; uint64_t seq = cring->sequence;
...@@ -258,9 +258,9 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, ...@@ -258,9 +258,9 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
other = cring->fences[idx]; other = cring->fences[idx];
if (other) { if (other) {
signed long r; signed long r;
r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT);
if (r < 0) if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r); return r;
} }
dma_fence_get(fence); dma_fence_get(fence);
...@@ -271,8 +271,10 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, ...@@ -271,8 +271,10 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
spin_unlock(&ctx->ring_lock); spin_unlock(&ctx->ring_lock);
dma_fence_put(other); dma_fence_put(other);
if (handler)
*handler = seq;
return seq; return 0;
} }
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
......
...@@ -65,6 +65,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); ...@@ -65,6 +65,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev); static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
static const char *amdgpu_asic_name[] = { static const char *amdgpu_asic_name[] = {
"TAHITI", "TAHITI",
...@@ -402,6 +403,15 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev) ...@@ -402,6 +403,15 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
*/ */
static int amdgpu_doorbell_init(struct amdgpu_device *adev) static int amdgpu_doorbell_init(struct amdgpu_device *adev)
{ {
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
adev->doorbell.size = 0;
adev->doorbell.num_doorbells = 0;
adev->doorbell.ptr = NULL;
return 0;
}
/* doorbell bar mapping */ /* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2); adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2); adev->doorbell.size = pci_resource_len(adev->pdev, 2);
...@@ -887,6 +897,20 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) ...@@ -887,6 +897,20 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
return r; return r;
} }
static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
struct atom_context *ctx = adev->mode_info.atom_context;
return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
}
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
NULL);
/** /**
* amdgpu_atombios_fini - free the driver info and callbacks for atombios * amdgpu_atombios_fini - free the driver info and callbacks for atombios
* *
...@@ -906,6 +930,7 @@ static void amdgpu_atombios_fini(struct amdgpu_device *adev) ...@@ -906,6 +930,7 @@ static void amdgpu_atombios_fini(struct amdgpu_device *adev)
adev->mode_info.atom_context = NULL; adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info); kfree(adev->mode_info.atom_card_info);
adev->mode_info.atom_card_info = NULL; adev->mode_info.atom_card_info = NULL;
device_remove_file(adev->dev, &dev_attr_vbios_version);
} }
/** /**
...@@ -922,6 +947,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev) ...@@ -922,6 +947,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
{ {
struct card_info *atom_card_info = struct card_info *atom_card_info =
kzalloc(sizeof(struct card_info), GFP_KERNEL); kzalloc(sizeof(struct card_info), GFP_KERNEL);
int ret;
if (!atom_card_info) if (!atom_card_info)
return -ENOMEM; return -ENOMEM;
...@@ -958,6 +984,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev) ...@@ -958,6 +984,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
amdgpu_atombios_scratch_regs_init(adev); amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev); amdgpu_atombios_allocate_fb_scratch(adev);
} }
ret = device_create_file(adev->dev, &dev_attr_vbios_version);
if (ret) {
DRM_ERROR("Failed to create device file for VBIOS version\n");
return ret;
}
return 0; return 0;
} }
...@@ -1757,10 +1790,8 @@ static int amdgpu_fini(struct amdgpu_device *adev) ...@@ -1757,10 +1790,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false; adev->ip_blocks[i].status.late_initialized = false;
} }
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev))
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
amdgpu_virt_release_full_gpu(adev, false); amdgpu_virt_release_full_gpu(adev, false);
}
return 0; return 0;
} }
...@@ -2051,9 +2082,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -2051,9 +2082,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
if (adev->asic_type >= CHIP_BONAIRE) /* doorbell bar mapping */
/* doorbell bar mapping */ amdgpu_doorbell_init(adev);
amdgpu_doorbell_init(adev);
/* io port mapping */ /* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
...@@ -2201,6 +2231,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -2201,6 +2231,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r) if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r); DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
r = amdgpu_debugfs_vbios_dump_init(adev);
if (r)
DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
if ((amdgpu_testing & 1)) { if ((amdgpu_testing & 1)) {
if (adev->accel_working) if (adev->accel_working)
amdgpu_test_moves(adev); amdgpu_test_moves(adev);
...@@ -2276,8 +2310,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) ...@@ -2276,8 +2310,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL; adev->rio_mem = NULL;
iounmap(adev->rmmio); iounmap(adev->rmmio);
adev->rmmio = NULL; adev->rmmio = NULL;
if (adev->asic_type >= CHIP_BONAIRE) amdgpu_doorbell_fini(adev);
amdgpu_doorbell_fini(adev);
amdgpu_debugfs_regs_cleanup(adev); amdgpu_debugfs_regs_cleanup(adev);
} }
...@@ -2546,7 +2579,8 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) ...@@ -2546,7 +2579,8 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) { (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
if (adev->ip_blocks[i].status.hang) { if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n"); DRM_INFO("Some block need full reset!\n");
return true; return true;
...@@ -2654,7 +2688,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) ...@@ -2654,7 +2688,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
mutex_lock(&adev->virt.lock_reset); mutex_lock(&adev->virt.lock_reset);
atomic_inc(&adev->gpu_reset_counter); atomic_inc(&adev->gpu_reset_counter);
adev->gfx.in_reset = true; adev->in_sriov_reset = true;
/* block TTM */ /* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
...@@ -2765,7 +2799,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) ...@@ -2765,7 +2799,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
dev_info(adev->dev, "GPU reset successed!\n"); dev_info(adev->dev, "GPU reset successed!\n");
} }
adev->gfx.in_reset = false; adev->in_sriov_reset = false;
mutex_unlock(&adev->virt.lock_reset); mutex_unlock(&adev->virt.lock_reset);
return r; return r;
} }
...@@ -3463,10 +3497,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, ...@@ -3463,10 +3497,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
valuesize = sizeof(values); valuesize = sizeof(values);
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize); r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
&valuesize);
else else
return -EINVAL; return -EINVAL;
...@@ -3754,6 +3785,28 @@ int amdgpu_debugfs_init(struct drm_minor *minor) ...@@ -3754,6 +3785,28 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
{ {
return 0; return 0;
} }
static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
seq_write(m, adev->bios, adev->bios_size);
return 0;
}
static const struct drm_info_list amdgpu_vbios_dump_list[] = {
{"amdgpu_vbios",
amdgpu_debugfs_get_vbios_dump,
0, NULL},
};
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
{
return amdgpu_debugfs_add_files(adev,
amdgpu_vbios_dump_list, 1);
}
#else #else
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
{ {
...@@ -3763,5 +3816,9 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) ...@@ -3763,5 +3816,9 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{ {
return 0; return 0;
} }
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
{
return 0;
}
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif #endif
...@@ -960,8 +960,10 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes) ...@@ -960,8 +960,10 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
} }
struct amd_vce_state* struct amd_vce_state*
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx) amdgpu_get_vce_clock_state(void *handle, u32 idx)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (idx < adev->pm.dpm.num_of_vce_states) if (idx < adev->pm.dpm.num_of_vce_states)
return &adev->pm.dpm.vce_states[idx]; return &adev->pm.dpm.vce_states[idx];
......
This diff is collapsed.
...@@ -69,9 +69,10 @@ ...@@ -69,9 +69,10 @@
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS. * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
* - 3.18.0 - Export gpu always on cu bitmap * - 3.18.0 - Export gpu always on cu bitmap
* - 3.19.0 - Add support for UVD MJPEG decode * - 3.19.0 - Add support for UVD MJPEG decode
* - 3.20.0 - Add support for local BOs
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 19 #define KMS_DRIVER_MINOR 20
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;
...@@ -91,7 +92,7 @@ int amdgpu_dpm = -1; ...@@ -91,7 +92,7 @@ int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1; int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1; int amdgpu_aspm = -1;
int amdgpu_runtime_pm = -1; int amdgpu_runtime_pm = -1;
unsigned amdgpu_ip_block_mask = 0xffffffff; uint amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1; int amdgpu_bapm = -1;
int amdgpu_deep_color = 0; int amdgpu_deep_color = 0;
int amdgpu_vm_size = -1; int amdgpu_vm_size = -1;
...@@ -106,14 +107,14 @@ int amdgpu_sched_jobs = 32; ...@@ -106,14 +107,14 @@ int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2; int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0; int amdgpu_no_evict = 0;
int amdgpu_direct_gma_size = 0; int amdgpu_direct_gma_size = 0;
unsigned amdgpu_pcie_gen_cap = 0; uint amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0; uint amdgpu_pcie_lane_cap = 0;
unsigned amdgpu_cg_mask = 0xffffffff; uint amdgpu_cg_mask = 0xffffffff;
unsigned amdgpu_pg_mask = 0xffffffff; uint amdgpu_pg_mask = 0xffffffff;
unsigned amdgpu_sdma_phase_quantum = 32; uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL; char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL; char *amdgpu_virtual_display = NULL;
unsigned amdgpu_pp_feature_mask = 0xffffffff; uint amdgpu_pp_feature_mask = 0xffffffff;
int amdgpu_ngg = 0; int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0; int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0; int amdgpu_pos_buf_per_se = 0;
...@@ -608,6 +609,8 @@ amdgpu_pci_remove(struct pci_dev *pdev) ...@@ -608,6 +609,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev); drm_dev_unregister(dev);
drm_dev_unref(dev); drm_dev_unref(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
} }
static void static void
...@@ -852,6 +855,7 @@ static struct drm_driver kms_driver = { ...@@ -852,6 +855,7 @@ static struct drm_driver kms_driver = {
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap, .gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap, .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
.name = DRIVER_NAME, .name = DRIVER_NAME,
.desc = DRIVER_DESC, .desc = DRIVER_DESC,
......
...@@ -149,7 +149,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, ...@@ -149,7 +149,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED, AMDGPU_GEM_CREATE_VRAM_CLEARED,
true, &gobj); true, NULL, &gobj);
if (ret) { if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size); pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM; return -ENOMEM;
...@@ -303,10 +303,10 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb ...@@ -303,10 +303,10 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
if (rfb->obj) { if (rfb->obj) {
amdgpufb_destroy_pinned_object(rfb->obj); amdgpufb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL; rfb->obj = NULL;
drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
} }
drm_fb_helper_fini(&rfbdev->helper); drm_fb_helper_fini(&rfbdev->helper);
drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
return 0; return 0;
} }
......
...@@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) ...@@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
} }
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain, int alignment, u32 initial_domain,
u64 flags, bool kernel, u64 flags, bool kernel,
struct drm_gem_object **obj) struct reservation_object *resv,
struct drm_gem_object **obj)
{ {
struct amdgpu_bo *robj; struct amdgpu_bo *bo;
int r; int r;
*obj = NULL; *obj = NULL;
...@@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, ...@@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
retry: retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
flags, NULL, NULL, 0, &robj); flags, NULL, resv, 0, &bo);
if (r) { if (r) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
...@@ -71,7 +72,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, ...@@ -71,7 +72,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
} }
return r; return r;
} }
*obj = &robj->gem_base; *obj = &bo->gem_base;
return 0; return 0;
} }
...@@ -112,7 +113,17 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, ...@@ -112,7 +113,17 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
struct mm_struct *mm;
int r; int r;
mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
if (mm && mm != current->mm)
return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
abo->tbo.resv != vm->root.base.bo->tbo.resv)
return -EPERM;
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, false);
if (r) if (r)
return r; return r;
...@@ -127,35 +138,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, ...@@ -127,35 +138,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0; return 0;
} }
static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
{
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (!amdgpu_bo_gpu_accessible(bo))
return -ERESTARTSYS;
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
return -ERESTARTSYS;
return 0;
}
static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
list_for_each_entry(entry, list, head) {
struct amdgpu_bo *bo =
container_of(entry->bo, struct amdgpu_bo, tbo);
if (amdgpu_gem_vm_check(NULL, bo))
return false;
}
return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
}
void amdgpu_gem_object_close(struct drm_gem_object *obj, void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
...@@ -165,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -165,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
struct list_head list; struct list_head list, duplicates;
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
int r; int r;
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo; tv.bo = &bo->tbo;
tv.shared = true; tv.shared = true;
...@@ -179,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -179,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) { if (r) {
dev_err(adev->dev, "leaking bo va because " dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r); "we fail to reserve bo (%d)\n", r);
...@@ -189,7 +172,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -189,7 +172,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (bo_va && --bo_va->ref_count == 0) { if (bo_va && --bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va); amdgpu_vm_bo_rmv(adev, bo_va);
if (amdgpu_gem_vm_ready(adev, vm, &list)) { if (amdgpu_vm_ready(vm)) {
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
r = amdgpu_vm_clear_freed(adev, vm, &fence); r = amdgpu_vm_clear_freed(adev, vm, &fence);
...@@ -214,18 +197,22 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -214,18 +197,22 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data; union drm_amdgpu_gem_create *args = data;
uint64_t flags = args->in.domain_flags;
uint64_t size = args->in.bo_size; uint64_t size = args->in.bo_size;
struct reservation_object *resv = NULL;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
uint32_t handle; uint32_t handle;
bool kernel = false;
int r; int r;
/* reject invalid gem flags */ /* reject invalid gem flags */
if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED)) AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID))
return -EINVAL; return -EINVAL;
/* reject invalid gem domains */ /* reject invalid gem domains */
...@@ -240,7 +227,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -240,7 +227,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
kernel = true; flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
size = size << AMDGPU_GDS_SHIFT; size = size << AMDGPU_GDS_SHIFT;
else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
...@@ -252,10 +239,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -252,10 +239,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
} }
size = roundup(size, PAGE_SIZE); size = roundup(size, PAGE_SIZE);
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
r = amdgpu_bo_reserve(vm->root.base.bo, false);
if (r)
return r;
resv = vm->root.base.bo->tbo.resv;
}
r = amdgpu_gem_object_create(adev, size, args->in.alignment, r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains), (u32)(0xffffffff & args->in.domains),
args->in.domain_flags, flags, false, resv, &gobj);
kernel, &gobj); if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
abo->parent = amdgpu_bo_ref(vm->root.base.bo);
}
amdgpu_bo_unreserve(vm->root.base.bo);
}
if (r) if (r)
return r; return r;
...@@ -297,9 +299,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -297,9 +299,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
} }
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
r = amdgpu_gem_object_create(adev, args->size, 0, r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
AMDGPU_GEM_DOMAIN_CPU, 0, 0, 0, NULL, &gobj);
0, &gobj);
if (r) if (r)
return r; return r;
...@@ -317,8 +318,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -317,8 +318,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
} }
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
down_read(&current->mm->mmap_sem);
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages); bo->tbo.ttm->pages);
if (r) if (r)
...@@ -333,8 +332,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -333,8 +332,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
if (r) if (r)
goto free_pages; goto free_pages;
up_read(&current->mm->mmap_sem);
} }
r = drm_gem_handle_create(filp, gobj, &handle); r = drm_gem_handle_create(filp, gobj, &handle);
...@@ -511,10 +508,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -511,10 +508,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct list_head *list, struct list_head *list,
uint32_t operation) uint32_t operation)
{ {
int r = -ERESTARTSYS; int r;
if (!amdgpu_gem_vm_ready(adev, vm, list)) if (!amdgpu_vm_ready(vm))
goto error; return;
r = amdgpu_vm_update_directories(adev, vm); r = amdgpu_vm_update_directories(adev, vm);
if (r) if (r)
...@@ -551,7 +548,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -551,7 +548,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head list; struct list_head list, duplicates;
uint64_t va_flags; uint64_t va_flags;
int r = 0; int r = 0;
...@@ -587,6 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -587,6 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
} }
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
if ((args->operation != AMDGPU_VA_OP_CLEAR) && if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) { !(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle); gobj = drm_gem_object_lookup(filp, args->handle);
...@@ -603,7 +601,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -603,7 +601,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) if (r)
goto error_unref; goto error_unref;
...@@ -669,6 +667,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -669,6 +667,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data; struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct amdgpu_bo *robj; struct amdgpu_bo *robj;
...@@ -716,6 +715,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, ...@@ -716,6 +715,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(adev, robj, true);
amdgpu_bo_unreserve(robj); amdgpu_bo_unreserve(robj);
break; break;
default: default:
...@@ -745,8 +747,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, ...@@ -745,8 +747,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = amdgpu_gem_object_create(adev, args->size, 0, r = amdgpu_gem_object_create(adev, args->size, 0,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
ttm_bo_type_device, false, NULL, &gobj);
&gobj);
if (r) if (r)
return -ENOMEM; return -ENOMEM;
......
...@@ -260,8 +260,13 @@ int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev, ...@@ -260,8 +260,13 @@ int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
/* create MQD for KIQ */ /* create MQD for KIQ */
ring = &adev->gfx.kiq.ring; ring = &adev->gfx.kiq.ring;
if (!ring->mqd_obj) { if (!ring->mqd_obj) {
/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
* otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
* deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
* KIQ MQD no matter SRIOV or Bare-metal
*/
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr); &ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) { if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
......
...@@ -169,6 +169,12 @@ int amdgpu_ih_process(struct amdgpu_device *adev) ...@@ -169,6 +169,12 @@ int amdgpu_ih_process(struct amdgpu_device *adev)
while (adev->irq.ih.rptr != wptr) { while (adev->irq.ih.rptr != wptr) {
u32 ring_index = adev->irq.ih.rptr >> 2; u32 ring_index = adev->irq.ih.rptr >> 2;
/* Prescreening of high-frequency interrupts */
if (!amdgpu_ih_prescreen_iv(adev)) {
adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
continue;
}
/* Before dispatching irq to IP blocks, send it to amdkfd */ /* Before dispatching irq to IP blocks, send it to amdkfd */
amdgpu_amdkfd_interrupt(adev, amdgpu_amdkfd_interrupt(adev,
(const void *) &adev->irq.ih.ring[ring_index]); (const void *) &adev->irq.ih.ring[ring_index]);
...@@ -190,3 +196,79 @@ int amdgpu_ih_process(struct amdgpu_device *adev) ...@@ -190,3 +196,79 @@ int amdgpu_ih_process(struct amdgpu_device *adev)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/**
* amdgpu_ih_add_fault - Add a page fault record
*
* @adev: amdgpu device pointer
* @key: 64-bit encoding of PASID and address
*
* This should be called when a retry page fault interrupt is
* received. If this is a new page fault, it will be added to a hash
* table. The return value indicates whether this is a new fault, or
* a fault that was already known and is already being handled.
*
* If there are too many pending page faults, this will fail. Retry
* interrupts should be ignored in this case until there is enough
* free space.
*
* Returns 0 if the fault was added, 1 if the fault was already known,
* -ENOSPC if there are too many pending faults.
*/
int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key)
{
unsigned long flags;
int r = -ENOSPC;
if (WARN_ON_ONCE(!adev->irq.ih.faults))
/* Should be allocated in <IP>_ih_sw_init on GPUs that
* support retry faults and require retry filtering.
*/
return r;
spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
/* Only let the hash table fill up to 50% for best performance */
if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
goto unlock_out;
r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL);
if (!r)
adev->irq.ih.faults->count++;
/* chash_table_copy_in should never fail unless we're losing count */
WARN_ON_ONCE(r < 0);
unlock_out:
spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
return r;
}
/**
* amdgpu_ih_clear_fault - Remove a page fault record
*
* @adev: amdgpu device pointer
* @key: 64-bit encoding of PASID and address
*
* This should be called when a page fault has been handled. Any
* future interrupt with this key will be processed as a new
* page fault.
*/
void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key)
{
unsigned long flags;
int r;
if (!adev->irq.ih.faults)
return;
spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL);
if (!WARN_ON_ONCE(r < 0)) {
adev->irq.ih.faults->count--;
WARN_ON_ONCE(adev->irq.ih.faults->count < 0);
}
spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
}
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#ifndef __AMDGPU_IH_H__ #ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__ #define __AMDGPU_IH_H__
#include <linux/chash.h>
struct amdgpu_device; struct amdgpu_device;
/* /*
* vega10+ IH clients * vega10+ IH clients
...@@ -69,6 +71,13 @@ enum amdgpu_ih_clientid ...@@ -69,6 +71,13 @@ enum amdgpu_ih_clientid
#define AMDGPU_IH_CLIENTID_LEGACY 0 #define AMDGPU_IH_CLIENTID_LEGACY 0
#define AMDGPU_PAGEFAULT_HASH_BITS 8
struct amdgpu_retryfault_hashtable {
DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
spinlock_t lock;
int count;
};
/* /*
* R6xx+ IH ring * R6xx+ IH ring
*/ */
...@@ -87,6 +96,7 @@ struct amdgpu_ih_ring { ...@@ -87,6 +96,7 @@ struct amdgpu_ih_ring {
bool use_doorbell; bool use_doorbell;
bool use_bus_addr; bool use_bus_addr;
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */ dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
struct amdgpu_retryfault_hashtable *faults;
}; };
#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4 #define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
...@@ -109,5 +119,7 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, ...@@ -109,5 +119,7 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
bool use_bus_addr); bool use_bus_addr);
void amdgpu_ih_ring_fini(struct amdgpu_device *adev); void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
int amdgpu_ih_process(struct amdgpu_device *adev); int amdgpu_ih_process(struct amdgpu_device *adev);
int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key);
void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key);
#endif #endif
...@@ -825,7 +825,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -825,7 +825,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
} }
r = amdgpu_vm_init(adev, &fpriv->vm, r = amdgpu_vm_init(adev, &fpriv->vm,
AMDGPU_VM_CONTEXT_GFX); AMDGPU_VM_CONTEXT_GFX, 0);
if (r) { if (r) {
kfree(fpriv); kfree(fpriv);
goto out_suspend; goto out_suspend;
...@@ -841,8 +841,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -841,8 +841,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
if (r) if (r) {
amdgpu_vm_fini(adev, &fpriv->vm);
kfree(fpriv);
goto out_suspend; goto out_suspend;
}
} }
mutex_init(&fpriv->bo_list_lock); mutex_init(&fpriv->bo_list_lock);
......
...@@ -50,8 +50,10 @@ struct amdgpu_mn { ...@@ -50,8 +50,10 @@ struct amdgpu_mn {
struct hlist_node node; struct hlist_node node;
/* objects protected by lock */ /* objects protected by lock */
struct mutex lock; struct rw_semaphore lock;
struct rb_root_cached objects; struct rb_root_cached objects;
struct mutex read_lock;
atomic_t recursion;
}; };
struct amdgpu_mn_node { struct amdgpu_mn_node {
...@@ -74,7 +76,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) ...@@ -74,7 +76,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
struct amdgpu_bo *bo, *next_bo; struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock); mutex_lock(&adev->mn_lock);
mutex_lock(&rmn->lock); down_write(&rmn->lock);
hash_del(&rmn->node); hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node, rbtree_postorder_for_each_entry_safe(node, next_node,
&rmn->objects.rb_root, it.rb) { &rmn->objects.rb_root, it.rb) {
...@@ -84,7 +86,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) ...@@ -84,7 +86,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
} }
kfree(node); kfree(node);
} }
mutex_unlock(&rmn->lock); up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn); kfree(rmn);
...@@ -106,6 +108,53 @@ static void amdgpu_mn_release(struct mmu_notifier *mn, ...@@ -106,6 +108,53 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
schedule_work(&rmn->work); schedule_work(&rmn->work);
} }
/**
* amdgpu_mn_lock - take the write side lock for this mn
*/
void amdgpu_mn_lock(struct amdgpu_mn *mn)
{
if (mn)
down_write(&mn->lock);
}
/**
* amdgpu_mn_unlock - drop the write side lock for this mn
*/
void amdgpu_mn_unlock(struct amdgpu_mn *mn)
{
if (mn)
up_write(&mn->lock);
}
/**
* amdgpu_mn_read_lock - take the rmn read lock
*
* @rmn: our notifier
*
* Take the rmn read side lock.
*/
static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
{
mutex_lock(&rmn->read_lock);
if (atomic_inc_return(&rmn->recursion) == 1)
down_read_non_owner(&rmn->lock);
mutex_unlock(&rmn->read_lock);
}
/**
* amdgpu_mn_read_unlock - drop the rmn read lock
*
* @rmn: our notifier
*
* Drop the rmn read side lock.
*/
static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
{
if (atomic_dec_return(&rmn->recursion) == 0)
up_read_non_owner(&rmn->lock);
}
/** /**
* amdgpu_mn_invalidate_node - unmap all BOs of a node * amdgpu_mn_invalidate_node - unmap all BOs of a node
* *
...@@ -126,23 +175,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, ...@@ -126,23 +175,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue; continue;
r = amdgpu_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
continue;
}
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT); true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0) if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r); DRM_ERROR("(%ld) failed to wait for user bo\n", r);
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
amdgpu_bo_unreserve(bo);
} }
} }
...@@ -168,7 +206,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, ...@@ -168,7 +206,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
end -= 1; end -= 1;
mutex_lock(&rmn->lock); amdgpu_mn_read_lock(rmn);
it = interval_tree_iter_first(&rmn->objects, start, end); it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) { while (it) {
...@@ -179,13 +217,32 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, ...@@ -179,13 +217,32 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
amdgpu_mn_invalidate_node(node, start, end); amdgpu_mn_invalidate_node(node, start, end);
} }
}
mutex_unlock(&rmn->lock); /**
* amdgpu_mn_invalidate_range_end - callback to notify about mm change
*
* @mn: our notifier
* @mn: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
* Release the lock again to allow new command submissions.
*/
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
amdgpu_mn_read_unlock(rmn);
} }
static const struct mmu_notifier_ops amdgpu_mn_ops = { static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release, .release = amdgpu_mn_release,
.invalidate_range_start = amdgpu_mn_invalidate_range_start, .invalidate_range_start = amdgpu_mn_invalidate_range_start,
.invalidate_range_end = amdgpu_mn_invalidate_range_end,
}; };
/** /**
...@@ -195,7 +252,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = { ...@@ -195,7 +252,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = {
* *
* Creates a notifier context for current->mm. * Creates a notifier context for current->mm.
*/ */
static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct amdgpu_mn *rmn; struct amdgpu_mn *rmn;
...@@ -220,8 +277,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) ...@@ -220,8 +277,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->adev = adev; rmn->adev = adev;
rmn->mm = mm; rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops; rmn->mn.ops = &amdgpu_mn_ops;
mutex_init(&rmn->lock); init_rwsem(&rmn->lock);
rmn->objects = RB_ROOT_CACHED; rmn->objects = RB_ROOT_CACHED;
mutex_init(&rmn->read_lock);
atomic_set(&rmn->recursion, 0);
r = __mmu_notifier_register(&rmn->mn, mm); r = __mmu_notifier_register(&rmn->mn, mm);
if (r) if (r)
...@@ -267,7 +326,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -267,7 +326,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos); INIT_LIST_HEAD(&bos);
mutex_lock(&rmn->lock); down_write(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node); kfree(node);
...@@ -281,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -281,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
if (!node) { if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) { if (!node) {
mutex_unlock(&rmn->lock); up_write(&rmn->lock);
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -296,7 +355,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -296,7 +355,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects); interval_tree_insert(&node->it, &rmn->objects);
mutex_unlock(&rmn->lock); up_write(&rmn->lock);
return 0; return 0;
} }
...@@ -322,7 +381,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) ...@@ -322,7 +381,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
return; return;
} }
mutex_lock(&rmn->lock); down_write(&rmn->lock);
/* save the next list entry for later */ /* save the next list entry for later */
head = bo->mn_list.next; head = bo->mn_list.next;
...@@ -337,6 +396,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) ...@@ -337,6 +396,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
kfree(node); kfree(node);
} }
mutex_unlock(&rmn->lock); up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
} }
/* /*
* Copyright 2015 Advanced Micro Devices, Inc. * Copyright 2017 Advanced Micro Devices, Inc.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
...@@ -19,20 +19,34 @@ ...@@ -19,20 +19,34 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
* Authors: Christian König
*/ */
#include "eventmgr.h" #ifndef __AMDGPU_MN_H__
#include "eventinit.h" #define __AMDGPU_MN_H__
#include "eventmanagement.h"
#include "eventmanager.h"
#include "power_state.h"
#include "hardwaremanager.h"
int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id); /*
* MMU Notifier
int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id); */
struct amdgpu_mn;
int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id);
int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip); #if defined(CONFIG_MMU_NOTIFIER)
void amdgpu_mn_lock(struct amdgpu_mn *mn);
void amdgpu_mn_unlock(struct amdgpu_mn *mn);
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
#else
static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
{
return NULL;
}
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
return -ENODEV;
}
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
#endif
int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip); #endif
...@@ -64,11 +64,12 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) ...@@ -64,11 +64,12 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
return false; return false;
} }
static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
struct ttm_placement *placement,
struct ttm_place *places,
u32 domain, u64 flags)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct ttm_placement *placement = &abo->placement;
struct ttm_place *places = abo->placements;
u64 flags = abo->flags;
u32 c = 0; u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) { if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
...@@ -151,27 +152,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, ...@@ -151,27 +152,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
placement->busy_placement = places; placement->busy_placement = places;
} }
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
struct ttm_placement *placement)
{
BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
memcpy(bo->placements, placement->placement,
placement->num_placement * sizeof(struct ttm_place));
bo->placement.num_placement = placement->num_placement;
bo->placement.num_busy_placement = placement->num_busy_placement;
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
}
/** /**
* amdgpu_bo_create_reserved - create reserved BO for kernel use * amdgpu_bo_create_reserved - create reserved BO for kernel use
* *
...@@ -303,14 +283,13 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, ...@@ -303,14 +283,13 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
*cpu_addr = NULL; *cpu_addr = NULL;
} }
int amdgpu_bo_create_restricted(struct amdgpu_device *adev, static int amdgpu_bo_do_create(struct amdgpu_device *adev,
unsigned long size, int byte_align, unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags, bool kernel, u32 domain, u64 flags,
struct sg_table *sg, struct sg_table *sg,
struct ttm_placement *placement, struct reservation_object *resv,
struct reservation_object *resv, uint64_t init_value,
uint64_t init_value, struct amdgpu_bo **bo_ptr)
struct amdgpu_bo **bo_ptr)
{ {
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
enum ttm_bo_type type; enum ttm_bo_type type;
...@@ -384,10 +363,11 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, ...@@ -384,10 +363,11 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
#endif #endif
amdgpu_fill_placement_to_bo(bo, placement); bo->tbo.bdev = &adev->mman.bdev;
/* Kernel allocation are uninterruptible */ amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
/* Kernel allocation are uninterruptible */
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL, &bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy); acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
...@@ -442,27 +422,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, ...@@ -442,27 +422,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
unsigned long size, int byte_align, unsigned long size, int byte_align,
struct amdgpu_bo *bo) struct amdgpu_bo *bo)
{ {
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
int r; int r;
if (bo->shadow) if (bo->shadow)
return 0; return 0;
memset(&placements, 0, sizeof(placements)); r = amdgpu_bo_do_create(adev, size, byte_align, true,
amdgpu_ttm_placement_init(adev, &placement, placements, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_SHADOW,
AMDGPU_GEM_CREATE_SHADOW); NULL, bo->tbo.resv, 0,
&bo->shadow);
r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW,
NULL, &placement,
bo->tbo.resv,
0,
&bo->shadow);
if (!r) { if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo); bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock); mutex_lock(&adev->shadow_list_lock);
...@@ -484,18 +454,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev, ...@@ -484,18 +454,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
uint64_t init_value, uint64_t init_value,
struct amdgpu_bo **bo_ptr) struct amdgpu_bo **bo_ptr)
{ {
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r; int r;
memset(&placements, 0, sizeof(placements)); r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
amdgpu_ttm_placement_init(adev, &placement, placements, parent_flags, sg, resv, init_value, bo_ptr);
domain, parent_flags);
r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
parent_flags, sg, &placement, resv,
init_value, bo_ptr);
if (r) if (r)
return r; return r;
...@@ -672,7 +635,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -672,7 +635,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i; int r, i;
unsigned fpfn, lpfn;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM; return -EPERM;
...@@ -704,22 +666,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -704,22 +666,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
} }
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_ttm_placement_from_domain(bo, domain); amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) { for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */ unsigned fpfn, lpfn;
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && fpfn = min_offset >> PAGE_SHIFT;
(!max_offset || max_offset > lpfn = max_offset >> PAGE_SHIFT;
adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
adev->mc.visible_vram_size))
return -EINVAL;
fpfn = min_offset >> PAGE_SHIFT;
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
} else {
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
}
if (fpfn > bo->placements[i].fpfn) if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn; bo->placements[i].fpfn = fpfn;
if (!bo->placements[i].lpfn || if (!bo->placements[i].lpfn ||
...@@ -929,7 +885,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -929,7 +885,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return; return;
abo = container_of(bo, struct amdgpu_bo, tbo); abo = container_of(bo, struct amdgpu_bo, tbo);
amdgpu_vm_bo_invalidate(adev, abo); amdgpu_vm_bo_invalidate(adev, abo, evict);
amdgpu_bo_kunmap(abo); amdgpu_bo_kunmap(abo);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
/* bo virtual addresses in a vm */ /* bo virtual addresses in a vm */
struct amdgpu_bo_va_mapping { struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va *bo_va;
struct list_head list; struct list_head list;
struct rb_node rb; struct rb_node rb;
uint64_t start; uint64_t start;
...@@ -49,12 +50,17 @@ struct amdgpu_bo_va { ...@@ -49,12 +50,17 @@ struct amdgpu_bo_va {
struct amdgpu_vm_bo_base base; struct amdgpu_vm_bo_base base;
/* protected by bo being reserved */ /* protected by bo being reserved */
struct dma_fence *last_pt_update;
unsigned ref_count; unsigned ref_count;
/* all other members protected by the VM PD being reserved */
struct dma_fence *last_pt_update;
/* mappings for this bo_va */ /* mappings for this bo_va */
struct list_head invalids; struct list_head invalids;
struct list_head valids; struct list_head valids;
/* If the mappings are cleared or filled */
bool cleared;
}; };
struct amdgpu_bo { struct amdgpu_bo {
...@@ -189,14 +195,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev, ...@@ -189,14 +195,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
struct reservation_object *resv, struct reservation_object *resv,
uint64_t init_value, uint64_t init_value,
struct amdgpu_bo **bo_ptr); struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct ttm_placement *placement,
struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev, int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align, unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr, u32 domain, struct amdgpu_bo **bo_ptr,
......
This diff is collapsed.
...@@ -87,17 +87,28 @@ static int amdgpu_pp_early_init(void *handle) ...@@ -87,17 +87,28 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_OLAND: case CHIP_OLAND:
case CHIP_HAINAN: case CHIP_HAINAN:
amd_pp->ip_funcs = &si_dpm_ip_funcs; amd_pp->ip_funcs = &si_dpm_ip_funcs;
amd_pp->pp_funcs = &si_dpm_funcs;
break; break;
#endif #endif
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE: case CHIP_BONAIRE:
case CHIP_HAWAII: case CHIP_HAWAII:
amd_pp->ip_funcs = &ci_dpm_ip_funcs; if (amdgpu_dpm == -1) {
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
amd_pp->pp_funcs = &ci_dpm_funcs;
} else {
adev->pp_enabled = true;
if (amdgpu_create_pp_handle(adev))
return -EINVAL;
amd_pp->ip_funcs = &pp_ip_funcs;
amd_pp->pp_funcs = &pp_dpm_funcs;
}
break; break;
case CHIP_KABINI: case CHIP_KABINI:
case CHIP_MULLINS: case CHIP_MULLINS:
case CHIP_KAVERI: case CHIP_KAVERI:
amd_pp->ip_funcs = &kv_dpm_ip_funcs; amd_pp->ip_funcs = &kv_dpm_ip_funcs;
amd_pp->pp_funcs = &kv_dpm_funcs;
break; break;
#endif #endif
default: default:
...@@ -128,7 +139,7 @@ static int amdgpu_pp_late_init(void *handle) ...@@ -128,7 +139,7 @@ static int amdgpu_pp_late_init(void *handle)
if (adev->pp_enabled && adev->pm.dpm_enabled) { if (adev->pp_enabled && adev->pm.dpm_enabled) {
amdgpu_pm_sysfs_init(adev); amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
} }
return ret; return ret;
......
...@@ -57,6 +57,40 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) ...@@ -57,6 +57,40 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap); ttm_bo_kunmap(&bo->dma_buf_vmap);
} }
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
unsigned asize = amdgpu_bo_size(bo);
int ret;
if (!vma->vm_file)
return -ENODEV;
if (adev == NULL)
return -ENODEV;
/* Check for valid size. */
if (asize < vma->vm_end - vma->vm_start)
return -EINVAL;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
return -EPERM;
}
vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
/* prime mmap does not need to check access, so allow here */
ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
if (ret)
return ret;
ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
return ret;
}
struct drm_gem_object * struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev, amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct dma_buf_attachment *attach,
...@@ -136,7 +170,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, ...@@ -136,7 +170,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
{ {
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
return drm_gem_prime_export(dev, gobj, flags); return drm_gem_prime_export(dev, gobj, flags);
......
...@@ -57,21 +57,23 @@ static int psp_sw_init(void *handle) ...@@ -57,21 +57,23 @@ static int psp_sw_init(void *handle)
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf; psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init; psp->ring_init = psp_v3_1_ring_init;
psp->ring_create = psp_v3_1_ring_create; psp->ring_create = psp_v3_1_ring_create;
psp->ring_stop = psp_v3_1_ring_stop;
psp->ring_destroy = psp_v3_1_ring_destroy; psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit; psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data; psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk; psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
psp->mode1_reset = psp_v3_1_mode1_reset;
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
#if 0
psp->init_microcode = psp_v10_0_init_microcode; psp->init_microcode = psp_v10_0_init_microcode;
#endif
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf; psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
psp->ring_init = psp_v10_0_ring_init; psp->ring_init = psp_v10_0_ring_init;
psp->ring_create = psp_v10_0_ring_create; psp->ring_create = psp_v10_0_ring_create;
psp->ring_stop = psp_v10_0_ring_stop;
psp->ring_destroy = psp_v10_0_ring_destroy; psp->ring_destroy = psp_v10_0_ring_destroy;
psp->cmd_submit = psp_v10_0_cmd_submit; psp->cmd_submit = psp_v10_0_cmd_submit;
psp->compare_sram_data = psp_v10_0_compare_sram_data; psp->compare_sram_data = psp_v10_0_compare_sram_data;
psp->mode1_reset = psp_v10_0_mode1_reset;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -90,6 +92,12 @@ static int psp_sw_init(void *handle) ...@@ -90,6 +92,12 @@ static int psp_sw_init(void *handle)
static int psp_sw_fini(void *handle) static int psp_sw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
return 0; return 0;
} }
...@@ -253,15 +261,18 @@ static int psp_asd_load(struct psp_context *psp) ...@@ -253,15 +261,18 @@ static int psp_asd_load(struct psp_context *psp)
static int psp_hw_start(struct psp_context *psp) static int psp_hw_start(struct psp_context *psp)
{ {
struct amdgpu_device *adev = psp->adev;
int ret; int ret;
ret = psp_bootloader_load_sysdrv(psp); if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
if (ret) ret = psp_bootloader_load_sysdrv(psp);
return ret; if (ret)
return ret;
ret = psp_bootloader_load_sos(psp); ret = psp_bootloader_load_sos(psp);
if (ret) if (ret)
return ret; return ret;
}
ret = psp_ring_create(psp, PSP_RING_TYPE__KM); ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret) if (ret)
...@@ -453,6 +464,16 @@ static int psp_hw_fini(void *handle) ...@@ -453,6 +464,16 @@ static int psp_hw_fini(void *handle)
static int psp_suspend(void *handle) static int psp_suspend(void *handle)
{ {
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
return ret;
}
return 0; return 0;
} }
...@@ -487,6 +508,22 @@ static int psp_resume(void *handle) ...@@ -487,6 +508,22 @@ static int psp_resume(void *handle)
return ret; return ret;
} }
static bool psp_check_reset(void* handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU)
return true;
return false;
}
static int psp_reset(void* handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return psp_mode1_reset(&adev->psp);
}
static bool psp_check_fw_loading_status(struct amdgpu_device *adev, static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
enum AMDGPU_UCODE_ID ucode_type) enum AMDGPU_UCODE_ID ucode_type)
{ {
...@@ -530,8 +567,9 @@ const struct amd_ip_funcs psp_ip_funcs = { ...@@ -530,8 +567,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
.suspend = psp_suspend, .suspend = psp_suspend,
.resume = psp_resume, .resume = psp_resume,
.is_idle = NULL, .is_idle = NULL,
.check_soft_reset = psp_check_reset,
.wait_for_idle = NULL, .wait_for_idle = NULL,
.soft_reset = NULL, .soft_reset = psp_reset,
.set_clockgating_state = psp_set_clockgating_state, .set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state, .set_powergating_state = psp_set_powergating_state,
}; };
......
...@@ -66,6 +66,8 @@ struct psp_context ...@@ -66,6 +66,8 @@ struct psp_context
struct psp_gfx_cmd_resp *cmd); struct psp_gfx_cmd_resp *cmd);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type); int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type); int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_stop)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp, int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type); enum psp_ring_type ring_type);
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode, int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
...@@ -74,6 +76,7 @@ struct psp_context ...@@ -74,6 +76,7 @@ struct psp_context
struct amdgpu_firmware_info *ucode, struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type); enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp); bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
/* fence buffer */ /* fence buffer */
struct amdgpu_bo *fw_pri_bo; struct amdgpu_bo *fw_pri_bo;
...@@ -123,6 +126,7 @@ struct amdgpu_psp_funcs { ...@@ -123,6 +126,7 @@ struct amdgpu_psp_funcs {
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type)) #define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type)) #define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type)) #define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type))) #define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \ #define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index)) (psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
...@@ -136,6 +140,8 @@ struct amdgpu_psp_funcs { ...@@ -136,6 +140,8 @@ struct amdgpu_psp_funcs {
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0) ((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \ #define psp_smu_reload_quirk(psp) \
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false) ((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false)
extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amd_ip_funcs psp_ip_funcs;
......
...@@ -14,62 +14,6 @@ ...@@ -14,62 +14,6 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
TRACE_EVENT(amdgpu_ttm_tt_populate,
TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
TP_ARGS(adev, dma_address, phys_address),
TP_STRUCT__entry(
__field(uint16_t, domain)
__field(uint8_t, bus)
__field(uint8_t, slot)
__field(uint8_t, func)
__field(uint64_t, dma)
__field(uint64_t, phys)
),
TP_fast_assign(
__entry->domain = pci_domain_nr(adev->pdev->bus);
__entry->bus = adev->pdev->bus->number;
__entry->slot = PCI_SLOT(adev->pdev->devfn);
__entry->func = PCI_FUNC(adev->pdev->devfn);
__entry->dma = dma_address;
__entry->phys = phys_address;
),
TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
(unsigned)__entry->domain,
(unsigned)__entry->bus,
(unsigned)__entry->slot,
(unsigned)__entry->func,
(unsigned long long)__entry->dma,
(unsigned long long)__entry->phys)
);
TRACE_EVENT(amdgpu_ttm_tt_unpopulate,
TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
TP_ARGS(adev, dma_address, phys_address),
TP_STRUCT__entry(
__field(uint16_t, domain)
__field(uint8_t, bus)
__field(uint8_t, slot)
__field(uint8_t, func)
__field(uint64_t, dma)
__field(uint64_t, phys)
),
TP_fast_assign(
__entry->domain = pci_domain_nr(adev->pdev->bus);
__entry->bus = adev->pdev->bus->number;
__entry->slot = PCI_SLOT(adev->pdev->devfn);
__entry->func = PCI_FUNC(adev->pdev->devfn);
__entry->dma = dma_address;
__entry->phys = phys_address;
),
TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
(unsigned)__entry->domain,
(unsigned)__entry->bus,
(unsigned)__entry->slot,
(unsigned)__entry->func,
(unsigned long long)__entry->dma,
(unsigned long long)__entry->phys)
);
TRACE_EVENT(amdgpu_mm_rreg, TRACE_EVENT(amdgpu_mm_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value), TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value), TP_ARGS(did, reg, value),
...@@ -473,5 +417,5 @@ TRACE_EVENT(amdgpu_ttm_bo_move, ...@@ -473,5 +417,5 @@ TRACE_EVENT(amdgpu_ttm_bo_move,
/* This part must be outside protection */ /* This part must be outside protection */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu
#include <trace/define_trace.h> #include <trace/define_trace.h>
/* Copyright Red Hat Inc 2010. /* Copyright Red Hat Inc 2010.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author : Dave Airlie <airlied@redhat.com> * Author : Dave Airlie <airlied@redhat.com>
*/ */
#include <drm/drmP.h> #include <drm/drmP.h>
......
This diff is collapsed.
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#ifndef __AMDGPU_TTM_H__ #ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__ #define __AMDGPU_TTM_H__
#include "amdgpu.h"
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
...@@ -45,8 +46,7 @@ struct amdgpu_mman { ...@@ -45,8 +46,7 @@ struct amdgpu_mman {
bool initialized; bool initialized;
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
struct dentry *vram; struct dentry *debugfs_entries[8];
struct dentry *gtt;
#endif #endif
/* buffer handling */ /* buffer handling */
...@@ -82,4 +82,20 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); ...@@ -82,4 +82,20 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem); int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
#endif #endif
...@@ -270,12 +270,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) ...@@ -270,12 +270,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
else else
return AMDGPU_FW_LOAD_SMU; return AMDGPU_FW_LOAD_SMU;
case CHIP_VEGA10: case CHIP_VEGA10:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
case CHIP_RAVEN: case CHIP_RAVEN:
if (load_type != 2) if (!load_type)
return AMDGPU_FW_LOAD_DIRECT; return AMDGPU_FW_LOAD_DIRECT;
else else
return AMDGPU_FW_LOAD_PSP; return AMDGPU_FW_LOAD_PSP;
...@@ -364,8 +360,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, ...@@ -364,8 +360,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_init_bo(struct amdgpu_device *adev) int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{ {
struct amdgpu_bo **bo = &adev->firmware.fw_buf; struct amdgpu_bo **bo = &adev->firmware.fw_buf;
uint64_t fw_mc_addr;
void *fw_buf_ptr = NULL;
uint64_t fw_offset = 0; uint64_t fw_offset = 0;
int i, err; int i, err;
struct amdgpu_firmware_info *ucode = NULL; struct amdgpu_firmware_info *ucode = NULL;
...@@ -376,37 +370,39 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) ...@@ -376,37 +370,39 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0; return 0;
} }
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
NULL, NULL, 0, bo); AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
if (err) { NULL, NULL, 0, bo);
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); if (err) {
goto failed; dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
} goto failed;
}
err = amdgpu_bo_reserve(*bo, false); err = amdgpu_bo_reserve(*bo, false);
if (err) { if (err) {
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err); dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
goto failed_reserve; goto failed_reserve;
} }
err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&fw_mc_addr); &adev->firmware.fw_buf_mc);
if (err) { if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin; goto failed_pin;
} }
err = amdgpu_bo_kmap(*bo, &fw_buf_ptr); err = amdgpu_bo_kmap(*bo, &adev->firmware.fw_buf_ptr);
if (err) { if (err) {
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err); dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
goto failed_kmap; goto failed_kmap;
} }
amdgpu_bo_unreserve(*bo); amdgpu_bo_unreserve(*bo);
}
memset(fw_buf_ptr, 0, adev->firmware.fw_size); memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
/* /*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE * if SMU loaded firmware, it needn't add SMC, UVD, and VCE
...@@ -425,14 +421,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) ...@@ -425,14 +421,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
ucode = &adev->firmware.ucode[i]; ucode = &adev->firmware.ucode[i];
if (ucode->fw) { if (ucode->fw) {
header = (const struct common_firmware_header *)ucode->fw->data; header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(adev, ucode, fw_mc_addr + fw_offset, amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
(void *)((uint8_t *)fw_buf_ptr + fw_offset)); adev->firmware.fw_buf_ptr + fw_offset);
if (i == AMDGPU_UCODE_ID_CP_MEC1 && if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
const struct gfx_firmware_header_v1_0 *cp_hdr; const struct gfx_firmware_header_v1_0 *cp_hdr;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset, amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
fw_buf_ptr + fw_offset); adev->firmware.fw_buf_ptr + fw_offset);
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
} }
fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE); fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
......
...@@ -269,6 +269,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -269,6 +269,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{ {
int i;
kfree(adev->uvd.saved_bo); kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
...@@ -279,6 +280,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) ...@@ -279,6 +280,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->uvd.ring); amdgpu_ring_fini(&adev->uvd.ring);
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
release_firmware(adev->uvd.fw); release_firmware(adev->uvd.fw);
return 0; return 0;
...@@ -410,10 +414,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) ...@@ -410,10 +414,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0; int r = 0;
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
if (mapping == NULL) { if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL; return r;
} }
if (!ctx->parser->adev->uvd.address_64_bit) { if (!ctx->parser->adev->uvd.address_64_bit) {
...@@ -737,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) ...@@ -737,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r; int r;
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
if (mapping == NULL) { if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL; return r;
} }
start = amdgpu_bo_gpu_offset(bo); start = amdgpu_bo_gpu_offset(bo);
...@@ -917,10 +921,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) ...@@ -917,10 +921,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL; return -EINVAL;
} }
r = amdgpu_cs_sysvm_access_required(parser);
if (r)
return r;
ctx.parser = parser; ctx.parser = parser;
ctx.buf_sizes = buf_sizes; ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx; ctx.ib_idx = ib_idx;
......
...@@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, ...@@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
uint64_t addr; uint64_t addr;
int r;
if (index == 0xffffffff) if (index == 0xffffffff)
index = 0; index = 0;
...@@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, ...@@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
addr += ((uint64_t)size) * ((uint64_t)index); addr += ((uint64_t)size) * ((uint64_t)index);
mapping = amdgpu_cs_find_mapping(p, addr, &bo); r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
if (mapping == NULL) { if (r) {
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
addr, lo, hi, size, index); addr, lo, hi, size, index);
return -EINVAL; return r;
} }
if ((addr + (uint64_t)size) > if ((addr + (uint64_t)size) >
...@@ -652,10 +653,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -652,10 +653,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
p->job->vm = NULL; p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
r = amdgpu_cs_sysvm_access_required(p);
if (r)
return r;
while (idx < ib->length_dw) { while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
......
This diff is collapsed.
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#define __AMDGPU_VM_H__ #define __AMDGPU_VM_H__
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/idr.h>
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
#include "amdgpu_sync.h" #include "amdgpu_sync.h"
...@@ -105,17 +106,24 @@ struct amdgpu_vm_bo_base { ...@@ -105,17 +106,24 @@ struct amdgpu_vm_bo_base {
/* protected by spinlock */ /* protected by spinlock */
struct list_head vm_status; struct list_head vm_status;
/* protected by the BO being reserved */
bool moved;
}; };
struct amdgpu_vm_pt { struct amdgpu_vm_pt {
struct amdgpu_bo *bo; struct amdgpu_vm_bo_base base;
uint64_t addr; uint64_t addr;
/* array of page tables, one for each directory entry */ /* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries; struct amdgpu_vm_pt *entries;
unsigned last_entry_used; unsigned last_entry_used;
}; };
#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
struct amdgpu_vm { struct amdgpu_vm {
/* tree of virtual addresses mapped */ /* tree of virtual addresses mapped */
struct rb_root_cached va; struct rb_root_cached va;
...@@ -123,19 +131,21 @@ struct amdgpu_vm { ...@@ -123,19 +131,21 @@ struct amdgpu_vm {
/* protecting invalidated */ /* protecting invalidated */
spinlock_t status_lock; spinlock_t status_lock;
/* BOs who needs a validation */
struct list_head evicted;
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
/* BOs moved, but not yet updated in the PT */ /* BOs moved, but not yet updated in the PT */
struct list_head moved; struct list_head moved;
/* BOs cleared in the PT because of a move */
struct list_head cleared;
/* BO mappings freed, but not yet updated in the PT */ /* BO mappings freed, but not yet updated in the PT */
struct list_head freed; struct list_head freed;
/* contains the page directory */ /* contains the page directory */
struct amdgpu_vm_pt root; struct amdgpu_vm_pt root;
struct dma_fence *last_dir_update; struct dma_fence *last_update;
uint64_t last_eviction_counter;
/* protecting freed */ /* protecting freed */
spinlock_t freed_lock; spinlock_t freed_lock;
...@@ -143,8 +153,9 @@ struct amdgpu_vm { ...@@ -143,8 +153,9 @@ struct amdgpu_vm {
/* Scheduler entity for page table updates */ /* Scheduler entity for page table updates */
struct amd_sched_entity entity; struct amd_sched_entity entity;
/* client id */ /* client id and PASID (TODO: replace client_id with PASID) */
u64 client_id; u64 client_id;
unsigned int pasid;
/* dedicated to vm */ /* dedicated to vm */
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
...@@ -153,6 +164,9 @@ struct amdgpu_vm { ...@@ -153,6 +164,9 @@ struct amdgpu_vm {
/* Flag to indicate ATS support from PTE for GFX9 */ /* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats; bool pte_support_ats;
/* Up to 128 pending page faults */
DECLARE_KFIFO(faults, u64, 128);
}; };
struct amdgpu_vm_id { struct amdgpu_vm_id {
...@@ -215,16 +229,25 @@ struct amdgpu_vm_manager { ...@@ -215,16 +229,25 @@ struct amdgpu_vm_manager {
* BIT1[= 0] Compute updated by SDMA [= 1] by CPU * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
*/ */
int vm_update_mode; int vm_update_mode;
/* PASID to VM mapping, will be used in interrupt context to
* look up VM of a page fault
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
}; };
int amdgpu_vm_alloc_pasid(unsigned int bits);
void amdgpu_vm_free_pasid(unsigned int pasid);
void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context); int vm_context, unsigned int pasid);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated, struct list_head *validated,
struct amdgpu_bo_list_entry *entry); struct amdgpu_bo_list_entry *entry);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo), int (*callback)(void *p, struct amdgpu_bo *bo),
void *param); void *param);
...@@ -243,13 +266,13 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, ...@@ -243,13 +266,13 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
int amdgpu_vm_clear_freed(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct dma_fence **fence); struct dma_fence **fence);
int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_sync *sync); struct amdgpu_vm *vm);
int amdgpu_vm_bo_update(struct amdgpu_device *adev, int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, struct amdgpu_bo_va *bo_va,
bool clear); bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo); struct amdgpu_bo *bo, bool evicted);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo); struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
...@@ -269,6 +292,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, ...@@ -269,6 +292,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size); uint64_t saddr, uint64_t size);
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va); struct amdgpu_bo_va *bo_va);
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment