Commit 8a6fb5b5 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-next

some more amd/ttm fixes.

* 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux:
  drm/ttm: Downgrade pr_err to pr_debug for memory allocation failures
  drm/ttm: Always and only destroy bo->ttm_resv in ttm_bo_release_list
  drm/amd/amdgpu: Enabling ACP clock in hw_init (v2)
  drm/amdgpu/virt: don't dereference undefined 'module' struct
parents 36a5fdf7 767601d1
......@@ -35,41 +35,50 @@
#include "acp_gfx_if.h"
#define ACP_TILE_ON_MASK 0x03
#define ACP_TILE_OFF_MASK 0x02
#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
#define ACP_TILE_P1_MASK 0x3e
#define ACP_TILE_P2_MASK 0x3d
#define ACP_TILE_DSP0_MASK 0x3b
#define ACP_TILE_DSP1_MASK 0x37
#define ACP_TILE_DSP2_MASK 0x2f
#define ACP_DMA_REGS_END 0x146c0
#define ACP_I2S_PLAY_REGS_START 0x14840
#define ACP_I2S_PLAY_REGS_END 0x148b4
#define ACP_I2S_CAP_REGS_START 0x148b8
#define ACP_I2S_CAP_REGS_END 0x1496c
#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
#define mmACP_PGFSM_RETAIN_REG 0x51c9
#define mmACP_PGFSM_CONFIG_REG 0x51ca
#define mmACP_PGFSM_READ_REG_0 0x51cc
#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
#define ACP_TIMEOUT_LOOP 0x000000FF
#define ACP_DEVS 3
#define ACP_SRC_ID 162
#define ACP_TILE_ON_MASK 0x03
#define ACP_TILE_OFF_MASK 0x02
#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
#define ACP_TILE_P1_MASK 0x3e
#define ACP_TILE_P2_MASK 0x3d
#define ACP_TILE_DSP0_MASK 0x3b
#define ACP_TILE_DSP1_MASK 0x37
#define ACP_TILE_DSP2_MASK 0x2f
#define ACP_DMA_REGS_END 0x146c0
#define ACP_I2S_PLAY_REGS_START 0x14840
#define ACP_I2S_PLAY_REGS_END 0x148b4
#define ACP_I2S_CAP_REGS_START 0x148b8
#define ACP_I2S_CAP_REGS_END 0x1496c
#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
#define mmACP_PGFSM_RETAIN_REG 0x51c9
#define mmACP_PGFSM_CONFIG_REG 0x51ca
#define mmACP_PGFSM_READ_REG_0 0x51cc
#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
#define mmACP_CONTROL 0x5131
#define mmACP_STATUS 0x5133
#define mmACP_SOFT_RESET 0x5134
#define ACP_CONTROL__ClkEn_MASK 0x1
#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
#define ACP_TIMEOUT_LOOP 0x000000FF
#define ACP_DEVS 3
#define ACP_SRC_ID 162
enum {
ACP_TILE_P1 = 0,
......@@ -260,6 +269,8 @@ static int acp_hw_init(void *handle)
{
int r, i;
uint64_t acp_base;
u32 val = 0;
u32 count = 0;
struct device *dev;
struct i2s_platform_data *i2s_pdata;
......@@ -400,6 +411,46 @@ static int acp_hw_init(void *handle)
}
}
/* Assert Soft reset of ACP */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val |= ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
(val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Enable clock to ACP and wait until the clock is enabled */
val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
val = val | ACP_CONTROL__ClkEn_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
count = ACP_CLOCK_EN_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
if (val & (u32) 0x1)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Deassert the SOFT RESET flags */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
return 0;
}
......@@ -412,6 +463,8 @@ static int acp_hw_init(void *handle)
static int acp_hw_fini(void *handle)
{
int i, ret;
u32 val = 0;
u32 count = 0;
struct device *dev;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
......@@ -419,6 +472,42 @@ static int acp_hw_fini(void *handle)
if (!adev->acp.acp_cell)
return 0;
/* Assert Soft reset of ACP */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val |= ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
(val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Disable ACP clock */
val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
val &= ~ACP_CONTROL__ClkEn_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
count = ACP_CLOCK_EN_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
if (val & (u32) 0x1)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
if (adev->acp.acp_genpd) {
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
......
......@@ -328,9 +328,11 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
sizeof(amdgim_vf2pf_info));
AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
&str);
#ifdef MODULE
if (THIS_MODULE->version != NULL)
strcpy(str, THIS_MODULE->version);
else
#endif
strcpy(str, "N/A");
AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
0);
......
......@@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
dma_fence_put(bo->moving);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
......@@ -402,14 +401,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
if (bo->resv == &bo->ttm_resv)
return 0;
reservation_object_init(&bo->ttm_resv);
BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
if (r) {
if (r)
reservation_object_unlock(&bo->ttm_resv);
reservation_object_fini(&bo->ttm_resv);
}
return r;
}
......@@ -457,10 +453,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
if (bo->resv != &bo->ttm_resv) {
if (bo->resv != &bo->ttm_resv)
reservation_object_unlock(&bo->ttm_resv);
reservation_object_fini(&bo->ttm_resv);
}
ttm_bo_cleanup_memtype_use(bo);
return;
......@@ -560,8 +554,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
}
ttm_bo_del_from_lru(bo);
if (!list_empty(&bo->ddestroy) && (bo->resv != &bo->ttm_resv))
reservation_object_fini(&bo->ttm_resv);
list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug);
......@@ -1210,8 +1202,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
lockdep_assert_held(&bo->resv->lock.base);
} else {
bo->resv = &bo->ttm_resv;
reservation_object_init(&bo->ttm_resv);
}
reservation_object_init(&bo->ttm_resv);
atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
bo->priority = 0;
......
......@@ -329,7 +329,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n");
pr_debug("Failed to allocate memory for pool free operation\n");
return 0;
}
......@@ -517,7 +517,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
pr_err("Unable to allocate table for new pages\n");
pr_debug("Unable to allocate table for new pages\n");
return -ENOMEM;
}
......@@ -525,7 +525,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
p = alloc_pages(gfp_flags, order);
if (!p) {
pr_err("Unable to get page %u\n", i);
pr_debug("Unable to get page %u\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
......@@ -625,7 +625,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
++pool->nrefills;
pool->npages += alloc_size;
} else {
pr_err("Failed to fill pool (%p)\n", pool);
pr_debug("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &new_pages, lru) {
++cpages;
......@@ -885,8 +885,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
while (npages) {
p = alloc_page(gfp_flags);
if (!p) {
pr_err("Unable to allocate page\n");
pr_debug("Unable to allocate page\n");
return -ENOMEM;
}
......@@ -925,7 +924,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
/* If there is any pages in the list put them back to
* the pool.
*/
pr_err("Failed to allocate extra pages for large request\n");
pr_debug("Failed to allocate extra pages for large request\n");
ttm_put_pages(pages, count, flags, cstate);
return r;
}
......
......@@ -463,7 +463,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
GFP_KERNEL);
if (!pages_to_free) {
pr_err("%s: Failed to allocate memory for pool free operation\n",
pr_debug("%s: Failed to allocate memory for pool free operation\n",
pool->dev_name);
return 0;
}
......@@ -755,7 +755,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
pr_err("%s: Unable to allocate table for new pages\n",
pr_debug("%s: Unable to allocate table for new pages\n",
pool->dev_name);
return -ENOMEM;
}
......@@ -768,8 +768,8 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
for (i = 0, cpages = 0; i < count; ++i) {
dma_p = __ttm_dma_alloc_page(pool);
if (!dma_p) {
pr_err("%s: Unable to get page %u\n",
pool->dev_name, i);
pr_debug("%s: Unable to get page %u\n",
pool->dev_name, i);
/* store already allocated pages in the pool after
* setting the caching state */
......@@ -855,8 +855,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
struct dma_page *d_page;
unsigned cpages = 0;
pr_err("%s: Failed to fill %s pool (r:%d)!\n",
pool->dev_name, pool->name, r);
pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
pool->dev_name, pool->name, r);
list_for_each_entry(d_page, &d_pages, page_list) {
cpages++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment