Commit 0e5493ca authored by CQ Tang's avatar CQ Tang Committed by Chris Wilson

drm/i915/stolen: make the object creation interface consistent

Our other backends return an actual error value upon failure. Do the
same for stolen objects, which currently just return NULL on failure.
Signed-off-by: default avatarCQ Tang <cq.tang@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004170452.15410-2-matthew.auld@intel.com
parent 7d423af9
...@@ -3066,7 +3066,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, ...@@ -3066,7 +3066,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
base_aligned, base_aligned,
base_aligned, base_aligned,
size_aligned); size_aligned);
if (!obj) if (IS_ERR(obj))
return false; return false;
switch (plane_config->tiling) { switch (plane_config->tiling) {
......
...@@ -141,10 +141,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper, ...@@ -141,10 +141,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very /* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other * important and we should probably use that space with FBC or other
* features. */ * features. */
obj = NULL; obj = ERR_PTR(-ENODEV);
if (size * 2 < dev_priv->stolen_usable_size) if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size); obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL) if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size); obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n"); DRM_ERROR("failed to allocate framebuffer\n");
......
...@@ -1291,7 +1291,7 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys) ...@@ -1291,7 +1291,7 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
int err; int err;
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE); obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
if (obj == NULL) if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
......
...@@ -553,10 +553,11 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, ...@@ -553,10 +553,11 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned int cache_level; unsigned int cache_level;
int err = -ENOMEM;
obj = i915_gem_object_alloc(); obj = i915_gem_object_alloc();
if (obj == NULL) if (!obj)
return NULL; goto err;
drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops); i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
...@@ -566,14 +567,16 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, ...@@ -566,14 +567,16 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level); i915_gem_object_set_cache_coherency(obj, cache_level);
if (i915_gem_object_pin_pages(obj)) err = i915_gem_object_pin_pages(obj);
if (err)
goto cleanup; goto cleanup;
return obj; return obj;
cleanup: cleanup:
i915_gem_object_free(obj); i915_gem_object_free(obj);
return NULL; err:
return ERR_PTR(err);
} }
struct drm_i915_gem_object * struct drm_i915_gem_object *
...@@ -585,28 +588,32 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, ...@@ -585,28 +588,32 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
int ret; int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen)) if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL; return ERR_PTR(-ENODEV);
if (size == 0) if (size == 0)
return NULL; return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen) if (!stolen)
return NULL; return ERR_PTR(-ENOMEM);
ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
if (ret) { if (ret) {
kfree(stolen); obj = ERR_PTR(ret);
return NULL; goto err_free;
} }
obj = _i915_gem_object_create_stolen(dev_priv, stolen); obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj) if (IS_ERR(obj))
goto err_remove;
return obj; return obj;
err_remove:
i915_gem_stolen_remove_node(dev_priv, stolen); i915_gem_stolen_remove_node(dev_priv, stolen);
err_free:
kfree(stolen); kfree(stolen);
return NULL; return obj;
} }
struct drm_i915_gem_object * struct drm_i915_gem_object *
...@@ -622,7 +629,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -622,7 +629,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
int ret; int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen)) if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL; return ERR_PTR(-ENODEV);
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
&stolen_offset, &gtt_offset, &size); &stolen_offset, &gtt_offset, &size);
...@@ -631,11 +638,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -631,11 +638,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (WARN_ON(size == 0) || if (WARN_ON(size == 0) ||
WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
return NULL; return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen) if (!stolen)
return NULL; return ERR_PTR(-ENOMEM);
stolen->start = stolen_offset; stolen->start = stolen_offset;
stolen->size = size; stolen->size = size;
...@@ -645,15 +652,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -645,15 +652,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen space\n"); DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
kfree(stolen); kfree(stolen);
return NULL; return ERR_PTR(ret);
} }
obj = _i915_gem_object_create_stolen(dev_priv, stolen); obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj == NULL) { if (IS_ERR(obj)) {
DRM_DEBUG_DRIVER("failed to allocate stolen object\n"); DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
i915_gem_stolen_remove_node(dev_priv, stolen); i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen); kfree(stolen);
return NULL; return obj;
} }
/* Some objects just need physical mem from stolen space */ /* Some objects just need physical mem from stolen space */
...@@ -706,5 +713,5 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -706,5 +713,5 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
err: err:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return NULL; return ERR_PTR(ret);
} }
...@@ -334,7 +334,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) ...@@ -334,7 +334,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
int ret; int ret;
obj = i915_gem_object_create_stolen(i915, size); obj = i915_gem_object_create_stolen(i915, size);
if (!obj) if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size); obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n"); DRM_ERROR("Failed to allocate scratch page\n");
......
...@@ -299,8 +299,8 @@ static int vlv_rc6_init(struct intel_rc6 *rc6) ...@@ -299,8 +299,8 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
pcbr_offset, pcbr_offset,
I915_GTT_OFFSET_NONE, I915_GTT_OFFSET_NONE,
pctx_size); pctx_size);
if (!pctx) if (IS_ERR(pctx))
return -ENOMEM; return PTR_ERR(pctx);
goto out; goto out;
} }
...@@ -316,9 +316,9 @@ static int vlv_rc6_init(struct intel_rc6 *rc6) ...@@ -316,9 +316,9 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
* memory, or any other relevant ranges. * memory, or any other relevant ranges.
*/ */
pctx = i915_gem_object_create_stolen(i915, pctx_size); pctx = i915_gem_object_create_stolen(i915, pctx_size);
if (!pctx) { if (IS_ERR(pctx)) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
return -ENOMEM; return PTR_ERR(pctx);
} }
GEM_BUG_ON(range_overflows_t(u64, GEM_BUG_ON(range_overflows_t(u64,
......
...@@ -1274,7 +1274,7 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) ...@@ -1274,7 +1274,7 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
struct i915_vma *vma; struct i915_vma *vma;
obj = i915_gem_object_create_stolen(i915, size); obj = i915_gem_object_create_stolen(i915, size);
if (!obj) if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size); obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment