Commit cb2ce93e authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Differentiate oom failures from invalid map types

After a cursory check on the parameters to i915_gem_object_pin_map(),
where we return a precise error, if the backend rejects the mapping we
always return PTR_ERR(-ENOMEM). Let us also return a more precise error
here so we can differentiate between running out of memory and
programming errors (or situations where we may be trying different paths
and looking for an error from an unsupported map).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201127195334.13134-1-chris@chris-wilson.co.uk
parent d2cf0125
...@@ -281,7 +281,7 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, ...@@ -281,7 +281,7 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
/* Too big for stack -- allocate temporary array instead */ /* Too big for stack -- allocate temporary array instead */
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (!pages) if (!pages)
return NULL; return ERR_PTR(-ENOMEM);
} }
i = 0; i = 0;
...@@ -305,13 +305,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, ...@@ -305,13 +305,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
void *vaddr; void *vaddr;
if (type != I915_MAP_WC) if (type != I915_MAP_WC)
return NULL; return ERR_PTR(-ENODEV);
if (n_pfn > ARRAY_SIZE(stack)) { if (n_pfn > ARRAY_SIZE(stack)) {
/* Too big for stack -- allocate temporary array instead */ /* Too big for stack -- allocate temporary array instead */
pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
if (!pfns) if (!pfns)
return NULL; return ERR_PTR(-ENOMEM);
} }
i = 0; i = 0;
...@@ -349,8 +349,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -349,8 +349,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj); err = ____i915_gem_object_get_pages(obj);
if (err) if (err) {
goto err_unlock; ptr = ERR_PTR(err);
goto out_unlock;
}
smp_mb__before_atomic(); smp_mb__before_atomic();
} }
...@@ -362,7 +364,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -362,7 +364,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
ptr = page_unpack_bits(obj->mm.mapping, &has_type); ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) { if (ptr && has_type != type) {
if (pinned) { if (pinned) {
err = -EBUSY; ptr = ERR_PTR(-EBUSY);
goto err_unpin; goto err_unpin;
} }
...@@ -374,15 +376,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -374,15 +376,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (!ptr) { if (!ptr) {
if (GEM_WARN_ON(type == I915_MAP_WC && if (GEM_WARN_ON(type == I915_MAP_WC &&
!static_cpu_has(X86_FEATURE_PAT))) !static_cpu_has(X86_FEATURE_PAT)))
ptr = NULL; ptr = ERR_PTR(-ENODEV);
else if (i915_gem_object_has_struct_page(obj)) else if (i915_gem_object_has_struct_page(obj))
ptr = i915_gem_object_map_page(obj, type); ptr = i915_gem_object_map_page(obj, type);
else else
ptr = i915_gem_object_map_pfn(obj, type); ptr = i915_gem_object_map_pfn(obj, type);
if (!ptr) { if (IS_ERR(ptr))
err = -ENOMEM;
goto err_unpin; goto err_unpin;
}
obj->mm.mapping = page_pack_bits(ptr, type); obj->mm.mapping = page_pack_bits(ptr, type);
} }
...@@ -393,8 +393,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -393,8 +393,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
err_unpin: err_unpin:
atomic_dec(&obj->mm.pages_pin_count); atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
ptr = ERR_PTR(err);
goto out_unlock; goto out_unlock;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment