Commit ce234ccc authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm/tegra/for-4.18-rc1' of git://anongit.freedesktop.org/tegra/linux into drm-next

drm/tegra: Changes for v4.18-rc1

This set enables IOMMU support in the gr2d and gr3d drivers and adds
support for the zpos property on older Tegra generations. It also
enables scaling filters and incorporates some rework to eliminate a
private wrapper around struct drm_framebuffer.

The remainder is mostly a random assortment of fixes and cleanups, as
well as some preparatory work for destaging the userspace ABI, which
is almost ready and is targetted for v4.19-rc1.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

# gpg: Signature made Sat 19 May 2018 08:31:00 AEST
# gpg:                using RSA key DD23ACD77F3EB3A1
# gpg: Can't check signature: public key not found
Link: https://patchwork.freedesktop.org/patch/msgid/20180518224523.30982-1-thierry.reding@gmail.com
parents ddfd0f4b 6134534c
This diff is collapsed.
......@@ -55,7 +55,7 @@ struct tegra_dc_soc_info {
bool supports_interlacing;
bool supports_cursor;
bool supports_block_linear;
bool supports_blending;
bool has_legacy_blending;
unsigned int pitch_align;
bool has_powergate;
bool coupled_pm;
......@@ -67,6 +67,8 @@ struct tegra_dc_soc_info {
const u32 *overlay_formats;
unsigned int num_overlay_formats;
const u64 *modifiers;
bool has_win_a_without_filters;
bool has_win_c_without_vert_filter;
};
struct tegra_dc {
......@@ -92,7 +94,7 @@ struct tegra_dc {
const struct tegra_dc_soc_info *soc;
struct iommu_domain *domain;
struct iommu_group *group;
};
static inline struct tegra_dc *
......@@ -553,6 +555,9 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define THREAD_NUM(x) (((x) & 0x1f) << 1)
#define THREAD_GROUP_ENABLE (1 << 0)
#define DC_WIN_H_FILTER_P(p) (0x601 + (p))
#define DC_WIN_V_FILTER_P(p) (0x619 + (p))
#define DC_WIN_CSC_YOF 0x611
#define DC_WIN_CSC_KYRGB 0x612
#define DC_WIN_CSC_KUR 0x613
......@@ -566,6 +571,8 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define H_DIRECTION (1 << 0)
#define V_DIRECTION (1 << 2)
#define COLOR_EXPAND (1 << 6)
#define H_FILTER (1 << 8)
#define V_FILTER (1 << 10)
#define CSC_ENABLE (1 << 18)
#define WIN_ENABLE (1 << 30)
......
......@@ -98,6 +98,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
goto free;
}
err = iova_cache_get();
if (err < 0)
goto domain;
geometry = &tegra->domain->geometry;
gem_start = geometry->aperture_start;
gem_end = geometry->aperture_end - CARVEOUT_SZ;
......@@ -191,11 +195,14 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm_mode_config_cleanup(drm);
if (tegra->domain) {
iommu_domain_free(tegra->domain);
drm_mm_takedown(&tegra->mm);
mutex_destroy(&tegra->mm_lock);
drm_mm_takedown(&tegra->mm);
put_iova_domain(&tegra->carveout.domain);
iova_cache_put();
}
domain:
if (tegra->domain)
iommu_domain_free(tegra->domain);
free:
kfree(tegra);
return err;
......@@ -217,10 +224,11 @@ static void tegra_drm_unload(struct drm_device *drm)
return;
if (tegra->domain) {
iommu_domain_free(tegra->domain);
drm_mm_takedown(&tegra->mm);
mutex_destroy(&tegra->mm_lock);
drm_mm_takedown(&tegra->mm);
put_iova_domain(&tegra->carveout.domain);
iova_cache_put();
iommu_domain_free(tegra->domain);
}
kfree(tegra);
......@@ -300,46 +308,15 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
return 0;
}
static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest,
struct drm_tegra_waitchk __user *src,
struct drm_file *file)
{
u32 cmdbuf;
int err;
err = get_user(cmdbuf, &src->handle);
if (err < 0)
return err;
err = get_user(dest->offset, &src->offset);
if (err < 0)
return err;
err = get_user(dest->syncpt_id, &src->syncpt);
if (err < 0)
return err;
err = get_user(dest->thresh, &src->thresh);
if (err < 0)
return err;
dest->bo = host1x_bo_lookup(file, cmdbuf);
if (!dest->bo)
return -ENOENT;
return 0;
}
int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file)
{
struct host1x_client *client = &context->client->base;
unsigned int num_cmdbufs = args->num_cmdbufs;
unsigned int num_relocs = args->num_relocs;
unsigned int num_waitchks = args->num_waitchks;
struct drm_tegra_cmdbuf __user *user_cmdbufs;
struct drm_tegra_reloc __user *user_relocs;
struct drm_tegra_waitchk __user *user_waitchks;
struct drm_tegra_syncpt __user *user_syncpt;
struct drm_tegra_syncpt syncpt;
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
......@@ -351,7 +328,6 @@ int tegra_drm_submit(struct tegra_drm_context *context,
user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
user_relocs = u64_to_user_ptr(args->relocs);
user_waitchks = u64_to_user_ptr(args->waitchks);
user_syncpt = u64_to_user_ptr(args->syncpts);
/* We don't yet support other than one syncpt_incr struct per submit */
......@@ -363,21 +339,20 @@ int tegra_drm_submit(struct tegra_drm_context *context,
return -EINVAL;
job = host1x_job_alloc(context->channel, args->num_cmdbufs,
args->num_relocs, args->num_waitchks);
args->num_relocs);
if (!job)
return -ENOMEM;
job->num_relocs = args->num_relocs;
job->num_waitchk = args->num_waitchks;
job->client = (u32)args->context;
job->class = context->client->base.class;
job->client = client;
job->class = client->class;
job->serialize = true;
/*
* Track referenced BOs so that they can be unreferenced after the
* submission is complete.
*/
num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks;
num_refs = num_cmdbufs + num_relocs * 2;
refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
if (!refs) {
......@@ -438,13 +413,13 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct host1x_reloc *reloc;
struct tegra_bo *obj;
err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
&user_relocs[num_relocs], drm,
file);
if (err < 0)
goto fail;
reloc = &job->relocarray[num_relocs];
reloc = &job->relocs[num_relocs];
obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
refs[num_refs++] = &obj->gem;
......@@ -468,30 +443,6 @@ int tegra_drm_submit(struct tegra_drm_context *context,
}
}
/* copy and resolve waitchks from submit */
while (num_waitchks--) {
struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
struct tegra_bo *obj;
err = host1x_waitchk_copy_from_user(
wait, &user_waitchks[num_waitchks], file);
if (err < 0)
goto fail;
obj = host1x_to_tegra_bo(wait->bo);
refs[num_refs++] = &obj->gem;
/*
* The unaligned offset will cause an unaligned write during
* of the waitchks patching, corrupting the commands stream.
*/
if (wait->offset & 3 ||
wait->offset >= obj->gem.size) {
err = -EINVAL;
goto fail;
}
}
if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
err = -EFAULT;
goto fail;
......@@ -1101,6 +1052,52 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
bool shared)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
if (tegra->domain) {
group = iommu_group_get(client->dev);
if (!group) {
dev_err(client->dev, "failed to get IOMMU group\n");
return ERR_PTR(-ENODEV);
}
if (!shared || (shared && (group != tegra->group))) {
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
iommu_group_put(group);
return ERR_PTR(err);
}
if (shared && !tegra->group)
tegra->group = group;
}
}
return group;
}
void host1x_client_iommu_detach(struct host1x_client *client,
struct iommu_group *group)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
if (group) {
if (group == tegra->group) {
iommu_detach_group(tegra->domain, group);
tegra->group = NULL;
}
iommu_group_put(group);
}
}
void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
{
struct iova *alloc;
......
......@@ -29,16 +29,10 @@
struct reset_control;
struct tegra_fb {
struct drm_framebuffer base;
struct tegra_bo **planes;
unsigned int num_planes;
};
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct tegra_fbdev {
struct drm_fb_helper base;
struct tegra_fb *fb;
struct drm_framebuffer *fb;
};
#endif
......@@ -97,6 +91,7 @@ struct tegra_drm_client {
struct host1x_client base;
struct list_head list;
unsigned int version;
const struct tegra_drm_client_ops *ops;
};
......@@ -110,6 +105,10 @@ int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
int tegra_drm_unregister_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
bool shared);
void host1x_client_iommu_detach(struct host1x_client *client,
struct iommu_group *group);
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
int tegra_drm_exit(struct tegra_drm *tegra);
......
......@@ -14,11 +14,7 @@
#include "drm.h"
#include "gem.h"
static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
{
return container_of(fb, struct tegra_fb, base);
}
#include <drm/drm_gem_framebuffer_helper.h>
#ifdef CONFIG_DRM_FBDEV_EMULATION
static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
......@@ -30,19 +26,14 @@ static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
if (index >= framebuffer->format->num_planes)
return NULL;
return fb->planes[index];
return to_tegra_bo(drm_gem_fb_get_obj(framebuffer, index));
}
bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
struct tegra_bo *bo = tegra_fb_get_plane(framebuffer, 0);
if (fb->planes[0]->flags & TEGRA_BO_BOTTOM_UP)
if (bo->flags & TEGRA_BO_BOTTOM_UP)
return true;
return false;
......@@ -51,8 +42,7 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
uint64_t modifier = fb->base.modifier;
uint64_t modifier = framebuffer->modifier;
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
......@@ -102,46 +92,17 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
return 0;
}
static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
unsigned int i;
for (i = 0; i < fb->num_planes; i++) {
struct tegra_bo *bo = fb->planes[i];
if (bo) {
if (bo->pages)
vunmap(bo->vaddr);
drm_gem_object_put_unlocked(&bo->gem);
}
}
drm_framebuffer_cleanup(framebuffer);
kfree(fb->planes);
kfree(fb);
}
static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer,
struct drm_file *file, unsigned int *handle)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
}
static const struct drm_framebuffer_funcs tegra_fb_funcs = {
.destroy = tegra_fb_destroy,
.create_handle = tegra_fb_create_handle,
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
static struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct tegra_bo **planes,
unsigned int num_planes)
{
struct tegra_fb *fb;
struct drm_framebuffer *fb;
unsigned int i;
int err;
......@@ -149,24 +110,15 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
if (!fb)
return ERR_PTR(-ENOMEM);
fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
if (!fb->planes) {
kfree(fb);
return ERR_PTR(-ENOMEM);
}
fb->num_planes = num_planes;
drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd);
drm_helper_mode_fill_fb_struct(drm, &fb->base, mode_cmd);
for (i = 0; i < fb->format->num_planes; i++)
fb->obj[i] = &planes[i]->gem;
for (i = 0; i < fb->num_planes; i++)
fb->planes[i] = planes[i];
err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs);
err = drm_framebuffer_init(drm, fb, &tegra_fb_funcs);
if (err < 0) {
dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
err);
kfree(fb->planes);
kfree(fb);
return ERR_PTR(err);
}
......@@ -181,7 +133,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
unsigned int hsub, vsub, i;
struct tegra_bo *planes[4];
struct drm_gem_object *gem;
struct tegra_fb *fb;
struct drm_framebuffer *fb;
int err;
hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format);
......@@ -217,7 +169,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
goto unreference;
}
return &fb->base;
return fb;
unreference:
while (i--)
......@@ -298,7 +250,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
return PTR_ERR(fbdev->fb);
}
fb = &fbdev->fb->base;
fb = fbdev->fb;
helper->fb = fb;
helper->fbdev = info;
......@@ -398,8 +350,17 @@ static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
{
drm_fb_helper_unregister_fbi(&fbdev->base);
if (fbdev->fb)
drm_framebuffer_remove(&fbdev->fb->base);
if (fbdev->fb) {
struct tegra_bo *bo = tegra_fb_get_plane(fbdev->fb, 0);
/* Undo the special mapping we made in fbdev probe. */
if (bo && bo->pages) {
vunmap(bo->vaddr);
bo->vaddr = 0;
}
drm_framebuffer_remove(fbdev->fb);
}
drm_fb_helper_fini(&fbdev->base);
tegra_fbdev_free(fbdev);
......
......@@ -422,14 +422,13 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
return 0;
}
static int tegra_bo_fault(struct vm_fault *vmf)
static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *gem = vma->vm_private_data;
struct tegra_bo *bo = to_tegra_bo(gem);
struct page *page;
pgoff_t offset;
int err;
if (!bo->pages)
return VM_FAULT_SIGBUS;
......@@ -437,20 +436,7 @@ static int tegra_bo_fault(struct vm_fault *vmf)
offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
page = bo->pages[offset];
err = vm_insert_page(vma, vmf->address, page);
switch (err) {
case -EAGAIN:
case 0:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
}
return VM_FAULT_SIGBUS;
return vmf_insert_page(vma, vmf->address, page);
}
const struct vm_operations_struct tegra_bo_vm_ops = {
......@@ -663,6 +649,8 @@ struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.exp_name = KBUILD_MODNAME;
exp_info.owner = drm->driver->fops->owner;
exp_info.ops = &tegra_gem_prime_dmabuf_ops;
exp_info.size = gem->size;
exp_info.flags = flags;
......
......@@ -7,16 +7,25 @@
*/
#include <linux/clk.h>
#include <linux/iommu.h>
#include <linux/of_device.h>
#include "drm.h"
#include "gem.h"
#include "gr2d.h"
struct gr2d_soc {
unsigned int version;
};
struct gr2d {
struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk;
const struct gr2d_soc *soc;
DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
};
......@@ -31,6 +40,7 @@ static int gr2d_init(struct host1x_client *client)
struct drm_device *dev = dev_get_drvdata(client->parent);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr2d *gr2d = to_gr2d(drm);
int err;
gr2d->channel = host1x_channel_request(client->dev);
if (!gr2d->channel)
......@@ -38,24 +48,48 @@ static int gr2d_init(struct host1x_client *client)
client->syncpts[0] = host1x_syncpt_request(client, flags);
if (!client->syncpts[0]) {
host1x_channel_put(gr2d->channel);
return -ENOMEM;
err = -ENOMEM;
dev_err(client->dev, "failed to request syncpoint: %d\n", err);
goto put;
}
gr2d->group = host1x_client_iommu_attach(client, false);
if (IS_ERR(gr2d->group)) {
err = PTR_ERR(gr2d->group);
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
err = tegra_drm_register_client(dev->dev_private, drm);
if (err < 0) {
dev_err(client->dev, "failed to register client: %d\n", err);
goto detach;
}
return tegra_drm_register_client(dev->dev_private, drm);
return 0;
detach:
host1x_client_iommu_detach(client, gr2d->group);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
host1x_channel_put(gr2d->channel);
return err;
}
static int gr2d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct gr2d *gr2d = to_gr2d(drm);
int err;
err = tegra_drm_unregister_client(dev->dev_private, drm);
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
host1x_client_iommu_detach(client, gr2d->group);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
......@@ -123,9 +157,17 @@ static const struct tegra_drm_client_ops gr2d_ops = {
.submit = tegra_drm_submit,
};
static const struct gr2d_soc tegra20_gr2d_soc = {
.version = 0x20,
};
static const struct gr2d_soc tegra30_gr2d_soc = {
.version = 0x30,
};
static const struct of_device_id gr2d_match[] = {
{ .compatible = "nvidia,tegra30-gr2d" },
{ .compatible = "nvidia,tegra20-gr2d" },
{ .compatible = "nvidia,tegra30-gr2d", .data = &tegra20_gr2d_soc },
{ .compatible = "nvidia,tegra20-gr2d", .data = &tegra30_gr2d_soc },
{ },
};
MODULE_DEVICE_TABLE(of, gr2d_match);
......@@ -158,6 +200,8 @@ static int gr2d_probe(struct platform_device *pdev)
if (!gr2d)
return -ENOMEM;
gr2d->soc = of_device_get_match_data(dev);
syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
if (!syncpts)
return -ENOMEM;
......@@ -182,6 +226,7 @@ static int gr2d_probe(struct platform_device *pdev)
gr2d->client.base.num_syncpts = 1;
INIT_LIST_HEAD(&gr2d->client.list);
gr2d->client.version = gr2d->soc->version;
gr2d->client.ops = &gr2d_ops;
err = host1x_client_register(&gr2d->client.base);
......
......@@ -9,7 +9,9 @@
#include <linux/clk.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
......@@ -19,7 +21,12 @@
#include "gem.h"
#include "gr3d.h"
struct gr3d_soc {
unsigned int version;
};
struct gr3d {
struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk_secondary;
......@@ -27,6 +34,8 @@ struct gr3d {
struct reset_control *rst_secondary;
struct reset_control *rst;
const struct gr3d_soc *soc;
DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
};
......@@ -41,6 +50,7 @@ static int gr3d_init(struct host1x_client *client)
struct drm_device *dev = dev_get_drvdata(client->parent);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr3d *gr3d = to_gr3d(drm);
int err;
gr3d->channel = host1x_channel_request(client->dev);
if (!gr3d->channel)
......@@ -48,11 +58,33 @@ static int gr3d_init(struct host1x_client *client)
client->syncpts[0] = host1x_syncpt_request(client, flags);
if (!client->syncpts[0]) {
host1x_channel_put(gr3d->channel);
return -ENOMEM;
err = -ENOMEM;
dev_err(client->dev, "failed to request syncpoint: %d\n", err);
goto put;
}
return tegra_drm_register_client(dev->dev_private, drm);
gr3d->group = host1x_client_iommu_attach(client, false);
if (IS_ERR(gr3d->group)) {
err = PTR_ERR(gr3d->group);
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
err = tegra_drm_register_client(dev->dev_private, drm);
if (err < 0) {
dev_err(client->dev, "failed to register client: %d\n", err);
goto detach;
}
return 0;
detach:
host1x_client_iommu_detach(client, gr3d->group);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
host1x_channel_put(gr3d->channel);
return err;
}
static int gr3d_exit(struct host1x_client *client)
......@@ -66,6 +98,7 @@ static int gr3d_exit(struct host1x_client *client)
if (err < 0)
return err;
host1x_client_iommu_detach(client, gr3d->group);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
......@@ -125,10 +158,22 @@ static const struct tegra_drm_client_ops gr3d_ops = {
.submit = tegra_drm_submit,
};
static const struct gr3d_soc tegra20_gr3d_soc = {
.version = 0x20,
};
static const struct gr3d_soc tegra30_gr3d_soc = {
.version = 0x30,
};
static const struct gr3d_soc tegra114_gr3d_soc = {
.version = 0x35,
};
static const struct of_device_id tegra_gr3d_match[] = {
{ .compatible = "nvidia,tegra114-gr3d" },
{ .compatible = "nvidia,tegra30-gr3d" },
{ .compatible = "nvidia,tegra20-gr3d" },
{ .compatible = "nvidia,tegra114-gr3d", .data = &tegra114_gr3d_soc },
{ .compatible = "nvidia,tegra30-gr3d", .data = &tegra30_gr3d_soc },
{ .compatible = "nvidia,tegra20-gr3d", .data = &tegra20_gr3d_soc },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
......@@ -250,6 +295,8 @@ static int gr3d_probe(struct platform_device *pdev)
if (!gr3d)
return -ENOMEM;
gr3d->soc = of_device_get_match_data(&pdev->dev);
syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
if (!syncpts)
return -ENOMEM;
......@@ -307,6 +354,7 @@ static int gr3d_probe(struct platform_device *pdev)
gr3d->client.base.num_syncpts = 1;
INIT_LIST_HEAD(&gr3d->client.list);
gr3d->client.version = gr3d->soc->version;
gr3d->client.ops = &gr3d_ops;
err = host1x_client_register(&gr3d->client.base);
......
......@@ -687,7 +687,7 @@ void tegra_display_hub_atomic_commit(struct drm_device *drm,
struct device *dev = hub->client.dev;
int err;
hub_state = tegra_display_hub_get_state(hub, state);
hub_state = to_tegra_display_hub_state(hub->base.state);
if (hub_state->clk) {
err = clk_set_rate(hub_state->clk, hub_state->rate);
......
......@@ -23,6 +23,7 @@ static void tegra_plane_destroy(struct drm_plane *plane)
static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_plane_state *state;
if (plane->state)
......@@ -35,6 +36,8 @@ static void tegra_plane_reset(struct drm_plane *plane)
if (state) {
plane->state = &state->base;
plane->state->plane = plane;
plane->state->zpos = p->index;
plane->state->normalized_zpos = p->index;
}
}
......@@ -53,10 +56,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
copy->tiling = state->tiling;
copy->format = state->format;
copy->swap = state->swap;
copy->bottom_up = state->bottom_up;
copy->opaque = state->opaque;
for (i = 0; i < 3; i++)
copy->dependent[i] = state->dependent[i];
for (i = 0; i < 2; i++)
copy->blending[i] = state->blending[i];
return &copy->base;
}
......@@ -267,24 +271,8 @@ static bool __drm_format_has_alpha(u32 format)
return false;
}
/*
* This is applicable to Tegra20 and Tegra30 only where the opaque formats can
* be emulated using the alpha formats and alpha blending disabled.
*/
bool tegra_plane_format_has_alpha(unsigned int format)
{
switch (format) {
case WIN_COLOR_DEPTH_B5G5R5A1:
case WIN_COLOR_DEPTH_A1B5G5R5:
case WIN_COLOR_DEPTH_R8G8B8A8:
case WIN_COLOR_DEPTH_B8G8R8A8:
return true;
}
return false;
}
int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
static int tegra_plane_format_get_alpha(unsigned int opaque,
unsigned int *alpha)
{
if (tegra_plane_format_is_yuv(opaque, NULL)) {
*alpha = opaque;
......@@ -316,6 +304,67 @@ int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
return -EINVAL;
}
/*
* This is applicable to Tegra20 and Tegra30 only where the opaque formats can
* be emulated using the alpha formats and alpha blending disabled.
*/
static int tegra_plane_setup_opacity(struct tegra_plane *tegra,
struct tegra_plane_state *state)
{
unsigned int format;
int err;
switch (state->format) {
case WIN_COLOR_DEPTH_B5G5R5A1:
case WIN_COLOR_DEPTH_A1B5G5R5:
case WIN_COLOR_DEPTH_R8G8B8A8:
case WIN_COLOR_DEPTH_B8G8R8A8:
state->opaque = false;
break;
default:
err = tegra_plane_format_get_alpha(state->format, &format);
if (err < 0)
return err;
state->format = format;
state->opaque = true;
break;
}
return 0;
}
static int tegra_plane_check_transparency(struct tegra_plane *tegra,
struct tegra_plane_state *state)
{
struct drm_plane_state *old, *plane_state;
struct drm_plane *plane;
old = drm_atomic_get_old_plane_state(state->base.state, &tegra->base);
/* check if zpos / transparency changed */
if (old->normalized_zpos == state->base.normalized_zpos &&
to_tegra_plane_state(old)->opaque == state->opaque)
return 0;
/* include all sibling planes into this commit */
drm_for_each_plane(plane, tegra->base.dev) {
struct tegra_plane *p = to_tegra_plane(plane);
/* skip this plane and planes on different CRTCs */
if (p == tegra || p->dc != tegra->dc)
continue;
plane_state = drm_atomic_get_plane_state(state->base.state,
plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
}
return 1;
}
static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
struct tegra_plane *other)
{
......@@ -336,61 +385,98 @@ static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
return index;
}
void tegra_plane_check_dependent(struct tegra_plane *tegra,
static void tegra_plane_update_transparency(struct tegra_plane *tegra,
struct tegra_plane_state *state)
{
struct drm_plane_state *old, *new;
struct drm_plane_state *new;
struct drm_plane *plane;
unsigned int zpos[2];
unsigned int i;
for (i = 0; i < 2; i++)
zpos[i] = 0;
for_each_oldnew_plane_in_state(state->base.state, plane, old, new, i) {
for_each_new_plane_in_state(state->base.state, plane, new, i) {
struct tegra_plane *p = to_tegra_plane(plane);
unsigned index;
/* skip this plane and planes on different CRTCs */
if (p == tegra || new->crtc != state->base.crtc)
if (p == tegra || p->dc != tegra->dc)
continue;
index = tegra_plane_get_overlap_index(tegra, p);
state->dependent[index] = false;
if (new->fb && __drm_format_has_alpha(new->fb->format->format))
state->blending[index].alpha = true;
else
state->blending[index].alpha = false;
if (new->normalized_zpos > state->base.normalized_zpos)
state->blending[index].top = true;
else
state->blending[index].top = false;
/*
* If any of the other planes is on top of this plane and uses
* a format with an alpha component, mark this plane as being
* dependent, meaning it's alpha value will be 1 minus the sum
* of alpha components of the overlapping planes.
* Missing framebuffer means that plane is disabled, in this
* case mark B / C window as top to be able to differentiate
* windows indices order in regards to zPos for the middle
* window X / Y registers programming.
*/
if (p->index > tegra->index) {
if (__drm_format_has_alpha(new->fb->format->format))
state->dependent[index] = true;
/* keep track of the Z position */
zpos[index] = p->index;
}
if (!new->fb)
state->blending[index].top = (index == 1);
}
}
static int tegra_plane_setup_transparency(struct tegra_plane *tegra,
struct tegra_plane_state *state)
{
struct tegra_plane_state *tegra_state;
struct drm_plane_state *new;
struct drm_plane *plane;
int err;
/*
* The region where three windows overlap is the intersection of the
* two regions where two windows overlap. It contributes to the area
* if any of the windows on top of it have an alpha component.
* If planes zpos / transparency changed, sibling planes blending
* state may require adjustment and in this case they will be included
* into this atom commit, otherwise blending state is unchanged.
*/
for (i = 0; i < 2; i++)
state->dependent[2] = state->dependent[2] ||
state->dependent[i];
err = tegra_plane_check_transparency(tegra, state);
if (err <= 0)
return err;
/*
* All planes are now in the atomic state, walk them up and update
* transparency state for each plane.
*/
drm_for_each_plane(plane, tegra->base.dev) {
struct tegra_plane *p = to_tegra_plane(plane);
/* skip planes on different CRTCs */
if (p->dc != tegra->dc)
continue;
new = drm_atomic_get_new_plane_state(state->base.state, plane);
tegra_state = to_tegra_plane_state(new);
/*
* However, if any of the windows on top of this window is opaque, it
* will completely conceal this window within that area, so avoid the
* window from contributing to the area.
* There is no need to update blending state for the disabled
* plane.
*/
for (i = 0; i < 2; i++) {
if (zpos[i] > tegra->index)
state->dependent[2] = state->dependent[2] &&
state->dependent[i];
if (new->fb)
tegra_plane_update_transparency(p, tegra_state);
}
return 0;
}
int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
struct tegra_plane_state *state)
{
int err;
err = tegra_plane_setup_opacity(tegra, state);
if (err < 0)
return err;
err = tegra_plane_setup_transparency(tegra, state);
if (err < 0)
return err;
return 0;
}
......@@ -34,6 +34,11 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
return container_of(plane, struct tegra_plane, base);
}
struct tegra_plane_legacy_blending_state {
bool alpha;
bool top;
};
struct tegra_plane_state {
struct drm_plane_state base;
......@@ -41,9 +46,11 @@ struct tegra_plane_state {
u32 format;
u32 swap;
bool bottom_up;
/* used for legacy blending support only */
struct tegra_plane_legacy_blending_state blending[2];
bool opaque;
bool dependent[3];
};
static inline struct tegra_plane_state *
......@@ -62,9 +69,7 @@ int tegra_plane_state_add(struct tegra_plane *plane,
int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap);
bool tegra_plane_format_is_yuv(unsigned int format, bool *planar);
bool tegra_plane_format_has_alpha(unsigned int format);
int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha);
void tegra_plane_check_dependent(struct tegra_plane *tegra,
int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
struct tegra_plane_state *state);
#endif /* TEGRA_PLANE_H */
......@@ -25,6 +25,7 @@
struct vic_config {
const char *firmware;
unsigned int version;
};
struct vic {
......@@ -264,18 +265,21 @@ static const struct tegra_drm_client_ops vic_ops = {
static const struct vic_config vic_t124_config = {
.firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE,
.version = 0x40,
};
#define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
static const struct vic_config vic_t210_config = {
.firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
.version = 0x21,
};
#define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
static const struct vic_config vic_t186_config = {
.firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE,
.version = 0x18,
};
static const struct of_device_id vic_match[] = {
......@@ -342,6 +346,7 @@ static int vic_probe(struct platform_device *pdev)
vic->dev = dev;
INIT_LIST_HEAD(&vic->client.list);
vic->client.version = vic->config->version;
vic->client.ops = &vic_ops;
err = host1x_client_register(&vic->client.base);
......
......@@ -51,7 +51,7 @@ static void host1x_pushbuffer_destroy(struct push_buffer *pb)
struct host1x_cdma *cdma = pb_to_cdma(pb);
struct host1x *host1x = cdma_to_host1x(cdma);
if (!pb->phys)
if (!pb->mapped)
return;
if (host1x->domain) {
......@@ -127,7 +127,7 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
iommu_free_iova:
__free_iova(&host1x->iova, alloc);
iommu_free_mem:
dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
dma_free_wc(host1x->dev, size, pb->mapped, pb->phys);
return err;
}
......@@ -247,7 +247,7 @@ static void cdma_start_timer_locked(struct host1x_cdma *cdma,
static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
{
cancel_delayed_work(&cdma->timeout.wq);
cdma->timeout.client = 0;
cdma->timeout.client = NULL;
}
/*
......
......@@ -44,7 +44,7 @@ struct host1x_job;
struct push_buffer {
void *mapped; /* mapped pushbuffer memory */
dma_addr_t dma; /* device address of pushbuffer */
phys_addr_t phys; /* physical address of pushbuffer */
dma_addr_t phys; /* physical address of pushbuffer */
u32 fence; /* index we've written */
u32 pos; /* index to write to */
u32 size;
......@@ -58,7 +58,7 @@ struct buffer_timeout {
u32 syncpt_val; /* syncpt value when completed */
ktime_t start_ktime; /* starting time */
/* context timeout information */
int client;
struct host1x_client *client;
};
enum cdma_event {
......
......@@ -103,7 +103,7 @@ static void show_syncpts(struct host1x *m, struct output *o)
static void show_all(struct host1x *m, struct output *o, bool show_fifo)
{
int i;
unsigned int i;
host1x_hw_show_mlocks(m, o);
show_syncpts(m, o);
......
......@@ -223,10 +223,14 @@ static int host1x_probe(struct platform_device *pdev)
struct iommu_domain_geometry *geometry;
unsigned long order;
err = iova_cache_get();
if (err < 0)
goto put_group;
host->domain = iommu_domain_alloc(&platform_bus_type);
if (!host->domain) {
err = -ENOMEM;
goto put_group;
goto put_cache;
}
err = iommu_attach_group(host->domain, host->group);
......@@ -234,6 +238,7 @@ static int host1x_probe(struct platform_device *pdev)
if (err == -ENODEV) {
iommu_domain_free(host->domain);
host->domain = NULL;
iova_cache_put();
iommu_group_put(host->group);
host->group = NULL;
goto skip_iommu;
......@@ -308,6 +313,9 @@ static int host1x_probe(struct platform_device *pdev)
fail_free_domain:
if (host->domain)
iommu_domain_free(host->domain);
put_cache:
if (host->group)
iova_cache_put();
put_group:
iommu_group_put(host->group);
......@@ -328,6 +336,7 @@ static int host1x_remove(struct platform_device *pdev)
put_iova_domain(&host->iova);
iommu_detach_group(host->domain, host->group);
iommu_domain_free(host->domain);
iova_cache_put();
iommu_group_put(host->group);
}
......
......@@ -78,7 +78,6 @@ struct host1x_syncpt_ops {
void (*load_wait_base)(struct host1x_syncpt *syncpt);
u32 (*load)(struct host1x_syncpt *syncpt);
int (*cpu_incr)(struct host1x_syncpt *syncpt);
int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
void (*assign_to_channel)(struct host1x_syncpt *syncpt,
struct host1x_channel *channel);
void (*enable_protection)(struct host1x *host);
......@@ -183,13 +182,6 @@ static inline int host1x_hw_syncpt_cpu_incr(struct host1x *host,
return host->syncpt_op->cpu_incr(sp);
}
static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
struct host1x_syncpt *sp,
void *patch_addr)
{
return host->syncpt_op->patch_wait(sp, patch_addr);
}
static inline void host1x_hw_syncpt_assign_to_channel(
struct host1x *host, struct host1x_syncpt *sp,
struct host1x_channel *ch)
......
......@@ -104,8 +104,7 @@ static int channel_submit(struct host1x_job *job)
sp = host->syncpt + job->syncpt_id;
trace_host1x_channel_submit(dev_name(ch->dev),
job->num_gathers, job->num_relocs,
job->num_waitchk, job->syncpt_id,
job->syncpt_incrs);
job->syncpt_id, job->syncpt_incrs);
/* before error checks, return current max */
prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
......@@ -165,7 +164,7 @@ static int channel_submit(struct host1x_job *job)
trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
/* schedule a submit complete interrupt */
err = host1x_intr_add_action(host, job->syncpt_id, syncval,
err = host1x_intr_add_action(host, sp, syncval,
HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
completed_waiter, NULL);
completed_waiter = NULL;
......
......@@ -96,16 +96,6 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
return 0;
}
/* remove a wait pointed to by patch_addr */
static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
{
u32 override = host1x_class_host_wait_syncpt(HOST1X_SYNCPT_RESERVED, 0);
*((u32 *)patch_addr) = override;
return 0;
}
/**
* syncpt_assign_to_channel() - Assign syncpoint to channel
* @sp: syncpoint
......@@ -156,7 +146,6 @@ static const struct host1x_syncpt_ops host1x_syncpt_ops = {
.load_wait_base = syncpt_read_wait_base,
.load = syncpt_load,
.cpu_incr = syncpt_cpu_incr,
.patch_wait = syncpt_patch_wait,
.assign_to_channel = syncpt_assign_to_channel,
.enable_protection = syncpt_enable_protection,
};
......@@ -144,7 +144,7 @@ static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
{
struct list_head *head = completed;
int i;
unsigned int i;
for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
action_handler handler = action_handlers[i];
......@@ -211,11 +211,11 @@ static void syncpt_thresh_work(struct work_struct *work)
host1x_syncpt_load(host->syncpt + id));
}
int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
enum host1x_intr_action action, void *data,
struct host1x_waitlist *waiter, void **ref)
int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
u32 thresh, enum host1x_intr_action action,
void *data, struct host1x_waitlist *waiter,
void **ref)
{
struct host1x_syncpt *syncpt;
int queue_was_empty;
if (waiter == NULL) {
......@@ -234,19 +234,17 @@ int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
waiter->data = data;
waiter->count = 1;
syncpt = host->syncpt + id;
spin_lock(&syncpt->intr.lock);
queue_was_empty = list_empty(&syncpt->intr.wait_head);
if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
/* added at head of list - new threshold value */
host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh);
/* added as first waiter - enable interrupt */
if (queue_was_empty)
host1x_hw_intr_enable_syncpt_intr(host, id);
host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
}
spin_unlock(&syncpt->intr.lock);
......
......@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/workqueue.h>
struct host1x_syncpt;
struct host1x;
enum host1x_intr_action {
......@@ -75,9 +76,10 @@ struct host1x_waitlist {
*
* This is a non-blocking api.
*/
int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
enum host1x_intr_action action, void *data,
struct host1x_waitlist *waiter, void **ref);
int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
u32 thresh, enum host1x_intr_action action,
void *data, struct host1x_waitlist *waiter,
void **ref);
/*
* Unreference an action submitted to host1x_intr_add_action().
......
......@@ -34,8 +34,7 @@
#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
u32 num_cmdbufs, u32 num_relocs,
u32 num_waitchks)
u32 num_cmdbufs, u32 num_relocs)
{
struct host1x_job *job = NULL;
unsigned int num_unpins = num_cmdbufs + num_relocs;
......@@ -46,7 +45,6 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
total = sizeof(struct host1x_job) +
(u64)num_relocs * sizeof(struct host1x_reloc) +
(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
(u64)num_waitchks * sizeof(struct host1x_waitchk) +
(u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
(u64)num_unpins * sizeof(dma_addr_t) +
(u64)num_unpins * sizeof(u32 *);
......@@ -62,12 +60,10 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
/* Redistribute memory to the structs */
mem += sizeof(struct host1x_job);
job->relocarray = num_relocs ? mem : NULL;
job->relocs = num_relocs ? mem : NULL;
mem += num_relocs * sizeof(struct host1x_reloc);
job->unpins = num_unpins ? mem : NULL;
mem += num_unpins * sizeof(struct host1x_job_unpin_data);
job->waitchk = num_waitchks ? mem : NULL;
mem += num_waitchks * sizeof(struct host1x_waitchk);
job->gathers = num_cmdbufs ? mem : NULL;
mem += num_cmdbufs * sizeof(struct host1x_job_gather);
job->addr_phys = num_unpins ? mem : NULL;
......@@ -100,84 +96,18 @@ void host1x_job_put(struct host1x_job *job)
EXPORT_SYMBOL(host1x_job_put);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
u32 words, u32 offset)
unsigned int words, unsigned int offset)
{
struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
gather->words = words;
gather->bo = bo;
gather->offset = offset;
cur_gather->words = words;
cur_gather->bo = bo;
cur_gather->offset = offset;
job->num_gathers++;
}
EXPORT_SYMBOL(host1x_job_add_gather);
/*
* NULL an already satisfied WAIT_SYNCPT host method, by patching its
* args in the command stream. The method data is changed to reference
* a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
* with a matching threshold value of 0, so is guaranteed to be popped
* by the host HW.
*/
static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
struct host1x_bo *h, u32 offset)
{
void *patch_addr = NULL;
/* patch the wait */
patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
if (patch_addr) {
host1x_syncpt_patch_wait(sp,
patch_addr + (offset & ~PAGE_MASK));
host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
} else
pr_err("Could not map cmdbuf for wait check\n");
}
/*
* Check driver supplied waitchk structs for syncpt thresholds
* that have already been satisfied and NULL the comparison (to
* avoid a wrap condition in the HW).
*/
static int do_waitchks(struct host1x_job *job, struct host1x *host,
struct host1x_job_gather *g)
{
struct host1x_bo *patch = g->bo;
int i;
/* compare syncpt vs wait threshold */
for (i = 0; i < job->num_waitchk; i++) {
struct host1x_waitchk *wait = &job->waitchk[i];
struct host1x_syncpt *sp =
host1x_syncpt_get(host, wait->syncpt_id);
/* validate syncpt id */
if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
continue;
/* skip all other gathers */
if (patch != wait->bo)
continue;
trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
wait->syncpt_id, wait->thresh,
host1x_syncpt_read_min(sp));
if (host1x_syncpt_is_expired(sp, wait->thresh)) {
dev_dbg(host->dev,
"drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
wait->syncpt_id, sp->name, wait->thresh,
host1x_syncpt_read_min(sp));
host1x_syncpt_patch_offset(sp, patch,
g->offset + wait->offset);
}
wait->bo = NULL;
}
return 0;
}
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
unsigned int i;
......@@ -186,7 +116,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
job->num_unpins = 0;
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocarray[i];
struct host1x_reloc *reloc = &job->relocs[i];
struct sg_table *sgt;
dma_addr_t phys_addr;
......@@ -267,14 +197,14 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
{
int i = 0;
u32 last_page = ~0;
void *cmdbuf_page_addr = NULL;
struct host1x_bo *cmdbuf = g->bo;
unsigned int i;
/* pin & patch the relocs for one gather */
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocarray[i];
struct host1x_reloc *reloc = &job->relocs[i];
u32 reloc_addr = (job->reloc_addr_phys[i] +
reloc->target.offset) >> reloc->shift;
u32 *target;
......@@ -331,17 +261,6 @@ static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
return true;
}
static bool check_wait(struct host1x_waitchk *wait, struct host1x_bo *cmdbuf,
unsigned int offset)
{
offset *= sizeof(u32);
if (wait->bo != cmdbuf || wait->offset != offset)
return false;
return true;
}
struct host1x_firewall {
struct host1x_job *job;
struct device *dev;
......@@ -349,9 +268,6 @@ struct host1x_firewall {
unsigned int num_relocs;
struct host1x_reloc *reloc;
unsigned int num_waitchks;
struct host1x_waitchk *waitchk;
struct host1x_bo *cmdbuf;
unsigned int offset;
......@@ -378,20 +294,6 @@ static int check_register(struct host1x_firewall *fw, unsigned long offset)
fw->reloc++;
}
if (offset == HOST1X_WAIT_SYNCPT_OFFSET) {
if (fw->class != HOST1X_CLASS_HOST1X)
return -EINVAL;
if (!fw->num_waitchks)
return -EINVAL;
if (!check_wait(fw->waitchk, fw->cmdbuf, fw->offset))
return -EINVAL;
fw->num_waitchks--;
fw->waitchk++;
}
return 0;
}
......@@ -550,14 +452,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
struct host1x_firewall fw;
size_t size = 0;
size_t offset = 0;
int i;
unsigned int i;
fw.job = job;
fw.dev = dev;
fw.reloc = job->relocarray;
fw.reloc = job->relocs;
fw.num_relocs = job->num_relocs;
fw.waitchk = job->waitchk;
fw.num_waitchks = job->num_waitchk;
fw.class = job->class;
for (i = 0; i < job->num_gathers; i++) {
......@@ -604,8 +504,8 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
offset += g->words * sizeof(u32);
}
/* No relocs and waitchks should remain at this point */
if (fw.num_relocs || fw.num_waitchks)
/* No relocs should remain at this point */
if (fw.num_relocs)
return -EINVAL;
return 0;
......@@ -616,19 +516,6 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
int err;
unsigned int i, j;
struct host1x *host = dev_get_drvdata(dev->parent);
DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
for (i = 0; i < job->num_waitchk; i++) {
u32 syncpt_id = job->waitchk[i].syncpt_id;
if (syncpt_id < host1x_syncpt_nb_pts(host))
set_bit(syncpt_id, waitchk_mask);
}
/* get current syncpt values for waitchk */
for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
host1x_syncpt_load(host->syncpt + i);
/* pin memory */
err = pin_job(host, job);
......@@ -663,10 +550,6 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
err = do_relocs(job, g);
if (err)
break;
err = do_waitchks(job, host, g);
if (err)
break;
}
out:
......
......@@ -20,10 +20,10 @@
#define __HOST1X_JOB_H
struct host1x_job_gather {
u32 words;
unsigned int words;
dma_addr_t base;
struct host1x_bo *bo;
u32 offset;
unsigned int offset;
bool handled;
};
......
......@@ -57,8 +57,8 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
struct host1x_client *client,
unsigned long flags)
{
int i;
struct host1x_syncpt *sp = host->syncpt;
unsigned int i;
char *name;
mutex_lock(&host->syncpt_mutex);
......@@ -255,7 +255,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
}
/* schedule a wakeup when the syncpoint value is reached */
err = host1x_intr_add_action(sp->host, sp->id, thresh,
err = host1x_intr_add_action(sp->host, sp, thresh,
HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
&wq, waiter, &ref);
if (err)
......@@ -373,12 +373,6 @@ bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
return (s32)(current_val - thresh) >= 0;
}
/* remove a wait pointed to by patch_addr */
int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
{
return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
}
int host1x_syncpt_init(struct host1x *host)
{
struct host1x_syncpt_base *bases;
......
......@@ -124,7 +124,4 @@ static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
return sp->id < host1x_syncpt_nb_pts(sp->host);
}
/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
#endif
......@@ -192,13 +192,6 @@ struct host1x_reloc {
unsigned long shift;
};
struct host1x_waitchk {
struct host1x_bo *bo;
u32 offset;
u32 syncpt_id;
u32 thresh;
};
struct host1x_job {
/* When refcount goes to zero, job can be freed */
struct kref ref;
......@@ -209,19 +202,15 @@ struct host1x_job {
/* Channel where job is submitted to */
struct host1x_channel *channel;
u32 client;
/* client where the job originated */
struct host1x_client *client;
/* Gathers and their memory */
struct host1x_job_gather *gathers;
unsigned int num_gathers;
/* Wait checks to be processed at submit time */
struct host1x_waitchk *waitchk;
unsigned int num_waitchk;
u32 waitchk_mask;
/* Array of handles to be pinned & unpinned */
struct host1x_reloc *relocarray;
struct host1x_reloc *relocs;
unsigned int num_relocs;
struct host1x_job_unpin_data *unpins;
unsigned int num_unpins;
......@@ -261,10 +250,9 @@ struct host1x_job {
};
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
u32 num_cmdbufs, u32 num_relocs,
u32 num_waitchks);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
u32 words, u32 offset);
u32 num_cmdbufs, u32 num_relocs);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
unsigned int words, unsigned int offset);
struct host1x_job *host1x_job_get(struct host1x_job *job);
void host1x_job_put(struct host1x_job *job);
int host1x_job_pin(struct host1x_job *job, struct device *dev);
......
......@@ -115,16 +115,15 @@ TRACE_EVENT(host1x_cdma_push_gather,
);
TRACE_EVENT(host1x_channel_submit,
TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 waitchks,
u32 syncpt_id, u32 syncpt_incrs),
TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 syncpt_id,
u32 syncpt_incrs),
TP_ARGS(name, cmdbufs, relocs, waitchks, syncpt_id, syncpt_incrs),
TP_ARGS(name, cmdbufs, relocs, syncpt_id, syncpt_incrs),
TP_STRUCT__entry(
__field(const char *, name)
__field(u32, cmdbufs)
__field(u32, relocs)
__field(u32, waitchks)
__field(u32, syncpt_id)
__field(u32, syncpt_incrs)
),
......@@ -133,14 +132,13 @@ TRACE_EVENT(host1x_channel_submit,
__entry->name = name;
__entry->cmdbufs = cmdbufs;
__entry->relocs = relocs;
__entry->waitchks = waitchks;
__entry->syncpt_id = syncpt_id;
__entry->syncpt_incrs = syncpt_incrs;
),
TP_printk("name=%s, cmdbufs=%u, relocs=%u, waitchks=%d,"
"syncpt_id=%u, syncpt_incrs=%u",
__entry->name, __entry->cmdbufs, __entry->relocs, __entry->waitchks,
TP_printk("name=%s, cmdbufs=%u, relocs=%u, syncpt_id=%u, "
"syncpt_incrs=%u",
__entry->name, __entry->cmdbufs, __entry->relocs,
__entry->syncpt_id, __entry->syncpt_incrs)
);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment