Commit 5641f19b authored by Oleksandr Andrushchenko's avatar Oleksandr Andrushchenko Committed by Boris Ostrovsky

drm/xen-front: Use Xen common shared buffer implementation

Use page directory based shared buffer implementation
now available as common code for Xen frontend drivers.

Remove flushing of shared buffer on page flip as this
workaround needs a proper fix.
Signed-off-by: default avatarOleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Reviewed-by: default avatarNoralf Trønnes <noralf@tronnes.org>
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent b3383974
......@@ -12,6 +12,7 @@ config DRM_XEN_FRONTEND
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select XEN_XENBUS_FRONTEND
select XEN_FRONT_PGDIR_SHBUF
help
Choose this option if you want to enable a para-virtualized
frontend DRM/KMS driver for Xen guest OSes.
......@@ -4,7 +4,6 @@ drm_xen_front-objs := xen_drm_front.o \
xen_drm_front_kms.o \
xen_drm_front_conn.o \
xen_drm_front_evtchnl.o \
xen_drm_front_shbuf.o \
xen_drm_front_cfg.o \
xen_drm_front_gem.o
......
......@@ -19,6 +19,7 @@
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/xen-front-pgdir-shbuf.h>
#include <xen/interface/io/displif.h>
#include "xen_drm_front.h"
......@@ -26,28 +27,20 @@
#include "xen_drm_front_evtchnl.h"
#include "xen_drm_front_gem.h"
#include "xen_drm_front_kms.h"
#include "xen_drm_front_shbuf.h"
struct xen_drm_front_dbuf {
struct list_head list;
u64 dbuf_cookie;
u64 fb_cookie;
struct xen_drm_front_shbuf *shbuf;
struct xen_front_pgdir_shbuf shbuf;
};
static int dbuf_add_to_list(struct xen_drm_front_info *front_info,
struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie)
static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
{
struct xen_drm_front_dbuf *dbuf;
dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
if (!dbuf)
return -ENOMEM;
dbuf->dbuf_cookie = dbuf_cookie;
dbuf->shbuf = shbuf;
list_add(&dbuf->list, &front_info->dbuf_list);
return 0;
}
static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
......@@ -62,15 +55,6 @@ static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
return NULL;
}
static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
{
struct xen_drm_front_dbuf *buf, *q;
list_for_each_entry_safe(buf, q, dbuf_list, list)
if (buf->fb_cookie == fb_cookie)
xen_drm_front_shbuf_flush(buf->shbuf);
}
static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
{
struct xen_drm_front_dbuf *buf, *q;
......@@ -78,8 +62,8 @@ static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
list_for_each_entry_safe(buf, q, dbuf_list, list)
if (buf->dbuf_cookie == dbuf_cookie) {
list_del(&buf->list);
xen_drm_front_shbuf_unmap(buf->shbuf);
xen_drm_front_shbuf_free(buf->shbuf);
xen_front_pgdir_shbuf_unmap(&buf->shbuf);
xen_front_pgdir_shbuf_free(&buf->shbuf);
kfree(buf);
break;
}
......@@ -91,8 +75,8 @@ static void dbuf_free_all(struct list_head *dbuf_list)
list_for_each_entry_safe(buf, q, dbuf_list, list) {
list_del(&buf->list);
xen_drm_front_shbuf_unmap(buf->shbuf);
xen_drm_front_shbuf_free(buf->shbuf);
xen_front_pgdir_shbuf_unmap(&buf->shbuf);
xen_front_pgdir_shbuf_free(&buf->shbuf);
kfree(buf);
}
}
......@@ -171,9 +155,9 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
u32 bpp, u64 size, struct page **pages)
{
struct xen_drm_front_evtchnl *evtchnl;
struct xen_drm_front_shbuf *shbuf;
struct xen_drm_front_dbuf *dbuf;
struct xendispl_req *req;
struct xen_drm_front_shbuf_cfg buf_cfg;
struct xen_front_pgdir_shbuf_cfg buf_cfg;
unsigned long flags;
int ret;
......@@ -181,28 +165,29 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
if (unlikely(!evtchnl))
return -EIO;
dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
if (!dbuf)
return -ENOMEM;
dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
memset(&buf_cfg, 0, sizeof(buf_cfg));
buf_cfg.xb_dev = front_info->xb_dev;
buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
buf_cfg.pages = pages;
buf_cfg.size = size;
buf_cfg.pgdir = &dbuf->shbuf;
buf_cfg.be_alloc = front_info->cfg.be_alloc;
shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
if (IS_ERR(shbuf))
return PTR_ERR(shbuf);
ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
if (ret < 0) {
xen_drm_front_shbuf_free(shbuf);
return ret;
}
ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
if (ret < 0)
goto fail_shbuf_alloc;
mutex_lock(&evtchnl->u.req.req_io_lock);
spin_lock_irqsave(&front_info->io_lock, flags);
req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
req->op.dbuf_create.gref_directory =
xen_drm_front_shbuf_get_dir_start(shbuf);
xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
req->op.dbuf_create.buffer_sz = size;
req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
req->op.dbuf_create.width = width;
......@@ -221,7 +206,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
if (ret < 0)
goto fail;
ret = xen_drm_front_shbuf_map(shbuf);
ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
if (ret < 0)
goto fail;
......@@ -230,6 +215,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
fail:
mutex_unlock(&evtchnl->u.req.req_io_lock);
fail_shbuf_alloc:
dbuf_free(&front_info->dbuf_list, dbuf_cookie);
return ret;
}
......@@ -358,7 +344,6 @@ int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
if (unlikely(conn_idx >= front_info->num_evt_pairs))
return -EINVAL;
dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
evtchnl = &front_info->evt_pairs[conn_idx].req;
mutex_lock(&evtchnl->u.req.req_io_lock);
......
......@@ -22,7 +22,6 @@
#include <xen/balloon.h>
#include "xen_drm_front.h"
#include "xen_drm_front_shbuf.h"
struct xen_gem_object {
struct drm_gem_object base;
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Xen para-virtual DRM device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
#ifndef __XEN_DRM_FRONT_SHBUF_H_
#define __XEN_DRM_FRONT_SHBUF_H_
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <xen/grant_table.h>
struct xen_drm_front_shbuf {
/*
* number of references granted for the backend use:
* - for allocated/imported dma-buf's this holds number of grant
* references for the page directory and pages of the buffer
* - for the buffer provided by the backend this holds number of
* grant references for the page directory as grant references for
* the buffer will be provided by the backend
*/
int num_grefs;
grant_ref_t *grefs;
unsigned char *directory;
int num_pages;
struct page **pages;
struct xenbus_device *xb_dev;
/* these are the ops used internally depending on be_alloc mode */
const struct xen_drm_front_shbuf_ops *ops;
/* Xen map handles for the buffer allocated by the backend */
grant_handle_t *backend_map_handles;
};
struct xen_drm_front_shbuf_cfg {
struct xenbus_device *xb_dev;
size_t size;
struct page **pages;
bool be_alloc;
};
struct xen_drm_front_shbuf *
xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg);
grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf);
int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf);
int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf);
void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf);
void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf);
#endif /* __XEN_DRM_FRONT_SHBUF_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment