Commit 2bfd43d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Not much happening, should have dequeued this lot earlier.

  One amdgpu, one nouveau and one exynos fix"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/exynos: atomic check only enabled crtc states
  drm/nouveau/bios/fan: hardcode the fan mode to linear
  drm/amdgpu: fix user fence handling
parents 24bc3ea5 1957d62c
...@@ -1265,6 +1265,7 @@ struct amdgpu_cs_parser { ...@@ -1265,6 +1265,7 @@ struct amdgpu_cs_parser {
/* user fence */ /* user fence */
struct amdgpu_user_fence uf; struct amdgpu_user_fence uf;
struct amdgpu_bo_list_entry uf_entry;
}; };
struct amdgpu_job { struct amdgpu_job {
......
...@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, ...@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0; return 0;
} }
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *fence_data)
{
struct drm_gem_object *gobj;
uint32_t handle;
handle = fence_data->handle;
gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
fence_data->handle);
if (gobj == NULL)
return -EINVAL;
p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf.offset = fence_data->offset;
if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
return -EINVAL;
}
p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{ {
union drm_amdgpu_cs *cs = data; union drm_amdgpu_cs *cs = data;
...@@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) ...@@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
case AMDGPU_CHUNK_ID_FENCE: case AMDGPU_CHUNK_ID_FENCE:
size = sizeof(struct drm_amdgpu_cs_chunk_fence); size = sizeof(struct drm_amdgpu_cs_chunk_fence);
if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
uint32_t handle;
struct drm_gem_object *gobj;
struct drm_amdgpu_cs_chunk_fence *fence_data;
fence_data = (void *)p->chunks[i].kdata;
handle = fence_data->handle;
gobj = drm_gem_object_lookup(p->adev->ddev,
p->filp, handle);
if (gobj == NULL) {
ret = -EINVAL; ret = -EINVAL;
goto free_partial_kdata; goto free_partial_kdata;
} }
p->uf.bo = gem_to_amdgpu_bo(gobj); ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
amdgpu_bo_ref(p->uf.bo); if (ret)
drm_gem_object_unreference_unlocked(gobj);
p->uf.offset = fence_data->offset;
} else {
ret = -EINVAL;
goto free_partial_kdata; goto free_partial_kdata;
}
break; break;
case AMDGPU_CHUNK_ID_DEPENDENCIES: case AMDGPU_CHUNK_ID_DEPENDENCIES:
...@@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) ...@@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
&p->validated); &p->validated);
if (p->uf.bo)
list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock) if (need_mmap_lock)
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
...@@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo ...@@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
for (i = 0; i < parser->num_ibs; i++) for (i = 0; i < parser->num_ibs; i++)
amdgpu_ib_free(parser->adev, &parser->ibs[i]); amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs); kfree(parser->ibs);
if (parser->uf.bo)
amdgpu_bo_unref(&parser->uf.bo); amdgpu_bo_unref(&parser->uf.bo);
amdgpu_bo_unref(&parser->uf_entry.robj);
} }
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
......
...@@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
{ {
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (!state->enable)
return 0;
if (exynos_crtc->ops->atomic_check) if (exynos_crtc->ops->atomic_check)
return exynos_crtc->ops->atomic_check(exynos_crtc, state); return exynos_crtc->ops->atomic_check(exynos_crtc, state);
......
...@@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan) ...@@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
fan->type = NVBIOS_THERM_FAN_UNK; fan->type = NVBIOS_THERM_FAN_UNK;
} }
fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
fan->min_duty = nvbios_rd08(bios, data + 0x02); fan->min_duty = nvbios_rd08(bios, data + 0x02);
fan->max_duty = nvbios_rd08(bios, data + 0x03); fan->max_duty = nvbios_rd08(bios, data + 0x03);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment