Commit f43e47c0 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/nvkm: add a replacement for nvkm_notify

This replaces the twisty, confusing, relationship between nvkm_event and
nvkm_notify with something much simpler, and less racey.  It also places
events in the object tree hierarchy, which will allow a heap of the code
tracking events across allocation/teardown/suspend to be removed.

This commit just adds the new interfaces, and passes the owning subdev to
the event constructor to enable debug-tracing in the new code.

v2:
- use ?: (lyude)
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent 361863ce
......@@ -32,6 +32,8 @@
#define NVIF_CLASS_VMM_GM200 /* ifb00d.h */ 0x8000b00d
#define NVIF_CLASS_VMM_GP100 /* ifc00d.h */ 0x8000c00d
#define NVIF_CLASS_EVENT /* if000e.h */ 0x8000000e
#define NVIF_CLASS_DISP /* if0010.h */ 0x80000010
#define NVIF_CLASS_CONN /* if0011.h */ 0x80000011
#define NVIF_CLASS_OUTP /* if0012.h */ 0x80000012
......
/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_EVENT_H__
#define __NVIF_EVENT_H__
#include <nvif/object.h>
#include <nvif/if000e.h>
struct nvif_event;
#define NVIF_EVENT_KEEP 0
#define NVIF_EVENT_DROP 1
typedef int (*nvif_event_func)(struct nvif_event *, void *repv, u32 repc);
struct nvif_event {
struct nvif_object object;
nvif_event_func func;
};
static inline bool
nvif_event_constructed(struct nvif_event *event)
{
return nvif_object_constructed(&event->object);
}
int nvif_event_ctor_(struct nvif_object *, const char *, u32, nvif_event_func, bool,
struct nvif_event_v0 *, u32, bool, struct nvif_event *);
static inline int
nvif_event_ctor(struct nvif_object *parent, const char *name, u32 handle, nvif_event_func func,
bool wait, struct nvif_event_v0 *args, u32 argc, struct nvif_event *event)
{
return nvif_event_ctor_(parent, name, handle, func, wait, args, argc, true, event);
}
void nvif_event_dtor(struct nvif_event *);
int nvif_event_allow(struct nvif_event *);
int nvif_event_block(struct nvif_event *);
struct nvif_notify_req_v0 {
__u8 version;
......
/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF000E_H__
#define __NVIF_IF000E_H__
union nvif_event_args {
struct nvif_event_v0 {
__u8 version;
__u8 wait;
__u8 pad02[6];
__u8 data[];
} v0;
};
#define NVIF_EVENT_V0_ALLOW 0x00
#define NVIF_EVENT_V0_BLOCK 0x01
union nvif_event_allow_args {
struct nvif_event_allow_vn {
} vn;
};
union nvif_event_block_args {
struct nvif_event_block_vn {
} vn;
};
#endif
......@@ -15,6 +15,7 @@ struct nvkm_client {
void *data;
int (*ntfy)(const void *, u32, const void *, u32);
int (*event)(u64 token, void *argv, u32 argc);
struct list_head umem;
spinlock_t lock;
......@@ -23,6 +24,7 @@ struct nvkm_client {
int nvkm_client_new(const char *name, u64 device, const char *cfg,
const char *dbg,
int (*)(const void *, u32, const void *, u32),
int (*)(u64, void *, u32),
struct nvkm_client **);
struct nvkm_client *nvkm_client_search(struct nvkm_client *, u64 handle);
......
......@@ -4,9 +4,12 @@
#include <core/os.h>
struct nvkm_notify;
struct nvkm_object;
struct nvkm_oclass;
struct nvkm_uevent;
struct nvkm_event {
const struct nvkm_event_func *func;
struct nvkm_subdev *subdev;
int types_nr;
int index_nr;
......@@ -15,6 +18,8 @@ struct nvkm_event {
spinlock_t list_lock;
struct list_head list;
int *refs;
struct list_head ntfy;
};
struct nvkm_event_func {
......@@ -25,11 +30,42 @@ struct nvkm_event_func {
void (*fini)(struct nvkm_event *, int type, int index);
};
int nvkm_event_init(const struct nvkm_event_func *func, int types_nr,
int nvkm_event_init(const struct nvkm_event_func *func, struct nvkm_subdev *, int types_nr,
int index_nr, struct nvkm_event *);
void nvkm_event_fini(struct nvkm_event *);
void nvkm_event_get(struct nvkm_event *, u32 types, int index);
void nvkm_event_put(struct nvkm_event *, u32 types, int index);
void nvkm_event_send(struct nvkm_event *, u32 types, int index,
void *data, u32 size);
#define NVKM_EVENT_KEEP 0
#define NVKM_EVENT_DROP 1
struct nvkm_event_ntfy;
typedef int (*nvkm_event_func)(struct nvkm_event_ntfy *, u32 bits);
struct nvkm_event_ntfy {
struct nvkm_event *event;
int id;
u32 bits;
bool wait;
nvkm_event_func func;
atomic_t allowed;
bool running;
struct list_head head;
};
void nvkm_event_ntfy(struct nvkm_event *, int id, u32 bits);
bool nvkm_event_ntfy_valid(struct nvkm_event *, int id, u32 bits);
void nvkm_event_ntfy_add(struct nvkm_event *, int id, u32 bits, bool wait, nvkm_event_func,
struct nvkm_event_ntfy *);
void nvkm_event_ntfy_del(struct nvkm_event_ntfy *);
void nvkm_event_ntfy_allow(struct nvkm_event_ntfy *);
void nvkm_event_ntfy_block(struct nvkm_event_ntfy *);
typedef int (*nvkm_uevent_func)(struct nvkm_object *, u64 token, u32 bits);
int nvkm_uevent_new(const struct nvkm_oclass *, void *argv, u32 argc, struct nvkm_object **);
int nvkm_uevent_add(struct nvkm_uevent *, struct nvkm_event *, int id, u32 bits, nvkm_uevent_func);
#endif
......@@ -4,6 +4,7 @@
#include <core/oclass.h>
struct nvkm_event;
struct nvkm_gpuobj;
struct nvkm_uevent;
struct nvkm_object {
const struct nvkm_object_func *func;
......@@ -43,6 +44,7 @@ struct nvkm_object_func {
int (*bind)(struct nvkm_object *, struct nvkm_gpuobj *, int align,
struct nvkm_gpuobj **);
int (*sclass)(struct nvkm_object *, int index, struct nvkm_oclass *);
int (*uevent)(struct nvkm_object *, void *argv, u32 argc, struct nvkm_uevent *);
};
void nvkm_object_ctor(const struct nvkm_object_func *,
......
......@@ -71,11 +71,24 @@ nvkm_client_suspend(void *priv)
return nvkm_object_fini(&client->object, true);
}
static int
nvkm_client_event(u64 token, void *repv, u32 repc)
{
struct nvif_object *object = (void *)(unsigned long)token;
struct nvif_event *event = container_of(object, typeof(*event), object);
if (event->func(event, repv, repc) == NVIF_EVENT_KEEP)
return NVKM_EVENT_KEEP;
return NVKM_EVENT_DROP;
}
static int
nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
const char *dbg, void **ppriv)
{
return nvkm_client_new(name, device, cfg, dbg, nvif_notify, (struct nvkm_client **)ppriv);
return nvkm_client_new(name, device, cfg, dbg, nvif_notify, nvkm_client_event,
(struct nvkm_client **)ppriv);
}
const struct nvif_driver
......
......@@ -5,6 +5,7 @@ nvif-y += nvif/conn.o
nvif-y += nvif/device.o
nvif-y += nvif/disp.o
nvif-y += nvif/driver.o
nvif-y += nvif/event.o
nvif-y += nvif/fifo.o
nvif-y += nvif/head.o
nvif-y += nvif/mem.o
......
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/event.h>
#include <nvif/printf.h>
#include <nvif/class.h>
#include <nvif/if000e.h>
int
nvif_event_block(struct nvif_event *event)
{
if (nvif_event_constructed(event)) {
int ret = nvif_mthd(&event->object, NVIF_EVENT_V0_BLOCK, NULL, 0);
NVIF_ERRON(ret, &event->object, "[BLOCK]");
return ret;
}
return 0;
}
int
nvif_event_allow(struct nvif_event *event)
{
if (nvif_event_constructed(event)) {
int ret = nvif_mthd(&event->object, NVIF_EVENT_V0_ALLOW, NULL, 0);
NVIF_ERRON(ret, &event->object, "[ALLOW]");
return ret;
}
return 0;
}
void
nvif_event_dtor(struct nvif_event *event)
{
nvif_object_dtor(&event->object);
}
int
nvif_event_ctor_(struct nvif_object *parent, const char *name, u32 handle, nvif_event_func func,
bool wait, struct nvif_event_v0 *args, u32 argc, bool warn,
struct nvif_event *event)
{
struct nvif_event_v0 _args;
int ret;
if (!args) {
args = &_args;
argc = sizeof(_args);
}
args->version = 0;
args->wait = wait;
ret = nvif_object_ctor(parent, name ?: "nvifEvent", handle,
NVIF_CLASS_EVENT, args, argc, &event->object);
NVIF_ERRON(ret && warn, parent, "[NEW EVENT wait:%d size:%zd]",
args->wait, argc - sizeof(*args));
if (ret)
return ret;
event->func = func;
return 0;
}
......@@ -14,3 +14,4 @@ nvkm-y += nvkm/core/oproxy.o
nvkm-y += nvkm/core/option.o
nvkm-y += nvkm/core/ramht.o
nvkm-y += nvkm/core/subdev.o
nvkm-y += nvkm/core/uevent.o
......@@ -44,7 +44,7 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){
args->v0.name[sizeof(args->v0.name) - 1] = 0;
ret = nvkm_client_new(args->v0.name, args->v0.device, NULL,
NULL, oclass->client->ntfy, &client);
NULL, oclass->client->ntfy, oclass->client->event, &client);
if (ret)
return ret;
} else
......@@ -286,7 +286,7 @@ int
nvkm_client_new(const char *name, u64 device, const char *cfg,
const char *dbg,
int (*ntfy)(const void *, u32, const void *, u32),
struct nvkm_client **pclient)
int (*event)(u64, void *, u32), struct nvkm_client **pclient)
{
struct nvkm_oclass oclass = { .base = nvkm_uclient_sclass };
struct nvkm_client *client;
......@@ -301,6 +301,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg,
client->debug = nvkm_dbgopt(dbg, "CLIENT");
client->objroot = RB_ROOT;
client->ntfy = ntfy;
client->event = event;
INIT_LIST_HEAD(&client->umem);
spin_lock_init(&client->lock);
return 0;
......
......@@ -21,14 +21,19 @@
*/
#include <core/event.h>
#include <core/notify.h>
#include <core/subdev.h>
void
nvkm_event_put(struct nvkm_event *event, u32 types, int index)
{
assert_spin_locked(&event->refs_lock);
nvkm_trace(event->subdev, "event: decr %08x on %d\n", types, index);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (--event->refs[index * event->types_nr + type] == 0) {
nvkm_trace(event->subdev, "event: blocking %d on %d\n", type, index);
if (event->func->fini)
event->func->fini(event, 1 << type, index);
}
......@@ -39,18 +44,146 @@ void
nvkm_event_get(struct nvkm_event *event, u32 types, int index)
{
assert_spin_locked(&event->refs_lock);
nvkm_trace(event->subdev, "event: incr %08x on %d\n", types, index);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (++event->refs[index * event->types_nr + type] == 1) {
nvkm_trace(event->subdev, "event: allowing %d on %d\n", type, index);
if (event->func->init)
event->func->init(event, 1 << type, index);
}
}
}
static void
nvkm_event_ntfy_state(struct nvkm_event_ntfy *ntfy)
{
struct nvkm_event *event = ntfy->event;
unsigned long flags;
nvkm_trace(event->subdev, "event: ntfy state changed\n");
spin_lock_irqsave(&event->refs_lock, flags);
if (atomic_read(&ntfy->allowed) != ntfy->running) {
if (ntfy->running) {
nvkm_event_put(ntfy->event, ntfy->bits, ntfy->id);
ntfy->running = false;
} else {
nvkm_event_get(ntfy->event, ntfy->bits, ntfy->id);
ntfy->running = true;
}
}
spin_unlock_irqrestore(&event->refs_lock, flags);
}
static void
nvkm_event_ntfy_remove(struct nvkm_event_ntfy *ntfy)
{
spin_lock_irq(&ntfy->event->list_lock);
list_del_init(&ntfy->head);
spin_unlock_irq(&ntfy->event->list_lock);
}
static void
nvkm_event_ntfy_insert(struct nvkm_event_ntfy *ntfy)
{
spin_lock_irq(&ntfy->event->list_lock);
list_add_tail(&ntfy->head, &ntfy->event->ntfy);
spin_unlock_irq(&ntfy->event->list_lock);
}
static void
nvkm_event_ntfy_block_(struct nvkm_event_ntfy *ntfy, bool wait)
{
struct nvkm_subdev *subdev = ntfy->event->subdev;
nvkm_trace(subdev, "event: ntfy block %08x on %d wait:%d\n", ntfy->bits, ntfy->id, wait);
if (atomic_xchg(&ntfy->allowed, 0) == 1) {
nvkm_event_ntfy_state(ntfy);
if (wait)
nvkm_event_ntfy_remove(ntfy);
}
}
void
nvkm_event_ntfy_block(struct nvkm_event_ntfy *ntfy)
{
if (ntfy->event)
nvkm_event_ntfy_block_(ntfy, ntfy->wait);
}
void
nvkm_event_ntfy_allow(struct nvkm_event_ntfy *ntfy)
{
nvkm_trace(ntfy->event->subdev, "event: ntfy allow %08x on %d\n", ntfy->bits, ntfy->id);
if (atomic_xchg(&ntfy->allowed, 1) == 0) {
nvkm_event_ntfy_state(ntfy);
if (ntfy->wait)
nvkm_event_ntfy_insert(ntfy);
}
}
void
nvkm_event_ntfy_del(struct nvkm_event_ntfy *ntfy)
{
struct nvkm_event *event = ntfy->event;
if (!event)
return;
nvkm_trace(event->subdev, "event: ntfy del %08x on %d\n", ntfy->bits, ntfy->id);
nvkm_event_ntfy_block_(ntfy, false);
nvkm_event_ntfy_remove(ntfy);
ntfy->event = NULL;
}
void
nvkm_event_ntfy_add(struct nvkm_event *event, int id, u32 bits, bool wait, nvkm_event_func func,
struct nvkm_event_ntfy *ntfy)
{
nvkm_trace(event->subdev, "event: ntfy add %08x on %d wait:%d\n", id, bits, wait);
ntfy->event = event;
ntfy->id = id;
ntfy->bits = bits;
ntfy->wait = wait;
ntfy->func = func;
atomic_set(&ntfy->allowed, 0);
ntfy->running = false;
INIT_LIST_HEAD(&ntfy->head);
if (!ntfy->wait)
nvkm_event_ntfy_insert(ntfy);
}
bool
nvkm_event_ntfy_valid(struct nvkm_event *event, int id, u32 bits)
{
return true;
}
void
nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits)
{
struct nvkm_event_ntfy *ntfy, *ntmp;
nvkm_trace(event->subdev, "event: ntfy %08x on %d\n", bits, id);
list_for_each_entry_safe(ntfy, ntmp, &event->ntfy, head) {
if (ntfy->id == id && ntfy->bits & bits) {
if (atomic_read(&ntfy->allowed))
ntfy->func(ntfy, ntfy->bits & bits);
}
}
}
void
nvkm_event_send(struct nvkm_event *event, u32 types, int index,
void *data, u32 size)
nvkm_event_send(struct nvkm_event *event, u32 types, int index, void *data, u32 size)
{
struct nvkm_notify *notify;
unsigned long flags;
......@@ -59,6 +192,8 @@ nvkm_event_send(struct nvkm_event *event, u32 types, int index,
return;
spin_lock_irqsave(&event->list_lock, flags);
nvkm_event_ntfy(event, index, types);
list_for_each_entry(notify, &event->list, head) {
if (notify->index == index && (notify->types & types)) {
if (event->func->send) {
......@@ -81,20 +216,20 @@ nvkm_event_fini(struct nvkm_event *event)
}
int
nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr,
struct nvkm_event *event)
nvkm_event_init(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
int types_nr, int index_nr, struct nvkm_event *event)
{
event->refs = kzalloc(array3_size(index_nr, types_nr,
sizeof(*event->refs)),
GFP_KERNEL);
event->refs = kzalloc(array3_size(index_nr, types_nr, sizeof(*event->refs)), GFP_KERNEL);
if (!event->refs)
return -ENOMEM;
event->func = func;
event->subdev = subdev;
event->types_nr = types_nr;
event->index_nr = index_nr;
spin_lock_init(&event->refs_lock);
spin_lock_init(&event->list_lock);
INIT_LIST_HEAD(&event->list);
INIT_LIST_HEAD(&event->ntfy);
return 0;
}
......@@ -47,6 +47,26 @@ nvkm_ioctl_nop(struct nvkm_client *client,
return ret;
}
#include <nvif/class.h>
static int
nvkm_ioctl_sclass_(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
{
if ( object->func->uevent &&
!object->func->uevent(object, NULL, 0, NULL) && index-- == 0) {
oclass->ctor = nvkm_uevent_new;
oclass->base.minver = 0;
oclass->base.maxver = 0;
oclass->base.oclass = NVIF_CLASS_EVENT;
return 0;
}
if (object->func->sclass)
return object->func->sclass(object, index, oclass);
return -ENOSYS;
}
static int
nvkm_ioctl_sclass(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
......@@ -64,8 +84,7 @@ nvkm_ioctl_sclass(struct nvkm_client *client,
if (size != args->v0.count * sizeof(args->v0.oclass[0]))
return -EINVAL;
while (object->func->sclass &&
object->func->sclass(object, i, &oclass) >= 0) {
while (nvkm_ioctl_sclass_(object, i, &oclass) >= 0) {
if (i < args->v0.count) {
args->v0.oclass[i].oclass = oclass.base.oclass;
args->v0.oclass[i].minver = oclass.base.minver;
......@@ -100,7 +119,7 @@ nvkm_ioctl_new(struct nvkm_client *client,
} else
return ret;
if (!parent->func->sclass) {
if (!parent->func->sclass && !parent->func->uevent) {
nvif_ioctl(parent, "cannot have children\n");
return -EINVAL;
}
......@@ -113,7 +132,7 @@ nvkm_ioctl_new(struct nvkm_client *client,
oclass.object = args->v0.object;
oclass.client = client;
oclass.parent = parent;
ret = parent->func->sclass(parent, i++, &oclass);
ret = nvkm_ioctl_sclass_(parent, i++, &oclass);
if (ret)
return ret;
} while (oclass.base.oclass != args->v0.oclass);
......
......@@ -105,6 +105,18 @@ nvkm_oproxy_sclass(struct nvkm_object *object, int index,
return oproxy->object->func->sclass(oproxy->object, index, oclass);
}
static int
nvkm_oproxy_uevent(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_uevent *uevent)
{
struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
if (!oproxy->object->func->uevent)
return -ENOSYS;
return oproxy->object->func->uevent(oproxy->object, argv, argc, uevent);
}
static int
nvkm_oproxy_fini(struct nvkm_object *object, bool suspend)
{
......@@ -188,6 +200,7 @@ nvkm_oproxy_func = {
.wr32 = nvkm_oproxy_wr32,
.bind = nvkm_oproxy_bind,
.sclass = nvkm_oproxy_sclass,
.uevent = nvkm_oproxy_uevent,
};
void
......
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_uevent(p) container_of((p), struct nvkm_uevent, object)
#include <core/event.h>
#include <core/client.h>
#include <nvif/if000e.h>
struct nvkm_uevent {
struct nvkm_object object;
struct nvkm_object *parent;
nvkm_uevent_func func;
bool wait;
struct nvkm_event_ntfy ntfy;
atomic_t allowed;
};
static int
nvkm_uevent_mthd_block(struct nvkm_uevent *uevent, union nvif_event_block_args *args, u32 argc)
{
if (argc != sizeof(args->vn))
return -ENOSYS;
nvkm_event_ntfy_block(&uevent->ntfy);
atomic_set(&uevent->allowed, 0);
return 0;
}
static int
nvkm_uevent_mthd_allow(struct nvkm_uevent *uevent, union nvif_event_allow_args *args, u32 argc)
{
if (argc != sizeof(args->vn))
return -ENOSYS;
nvkm_event_ntfy_allow(&uevent->ntfy);
atomic_set(&uevent->allowed, 1);
return 0;
}
static int
nvkm_uevent_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_uevent *uevent = nvkm_uevent(object);
switch (mthd) {
case NVIF_EVENT_V0_ALLOW: return nvkm_uevent_mthd_allow(uevent, argv, argc);
case NVIF_EVENT_V0_BLOCK: return nvkm_uevent_mthd_block(uevent, argv, argc);
default:
break;
}
return -EINVAL;
}
static int
nvkm_uevent_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_uevent *uevent = nvkm_uevent(object);
nvkm_event_ntfy_block(&uevent->ntfy);
return 0;
}
static int
nvkm_uevent_init(struct nvkm_object *object)
{
struct nvkm_uevent *uevent = nvkm_uevent(object);
if (atomic_read(&uevent->allowed))
nvkm_event_ntfy_allow(&uevent->ntfy);
return 0;
}
static void *
nvkm_uevent_dtor(struct nvkm_object *object)
{
struct nvkm_uevent *uevent = nvkm_uevent(object);
nvkm_event_ntfy_del(&uevent->ntfy);
return uevent;
}
static const struct nvkm_object_func
nvkm_uevent = {
.dtor = nvkm_uevent_dtor,
.init = nvkm_uevent_init,
.fini = nvkm_uevent_fini,
.mthd = nvkm_uevent_mthd,
};
static int
nvkm_uevent_ntfy(struct nvkm_event_ntfy *ntfy, u32 bits)
{
struct nvkm_uevent *uevent = container_of(ntfy, typeof(*uevent), ntfy);
struct nvkm_client *client = uevent->object.client;
if (uevent->func)
return uevent->func(uevent->parent, uevent->object.token, bits);
return client->event(uevent->object.token, NULL, 0);
}
int
nvkm_uevent_add(struct nvkm_uevent *uevent, struct nvkm_event *event, int id, u32 bits,
nvkm_uevent_func func)
{
if (WARN_ON(uevent->func))
return -EBUSY;
nvkm_event_ntfy_add(event, id, bits, uevent->wait, nvkm_uevent_ntfy, &uevent->ntfy);
uevent->func = func;
return 0;
}
int
nvkm_uevent_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_object *parent = oclass->parent;
struct nvkm_uevent *uevent;
union nvif_event_args *args = argv;
if (argc < sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
if (!(uevent = kzalloc(sizeof(*uevent), GFP_KERNEL)))
return -ENOMEM;
*pobject = &uevent->object;
nvkm_object_ctor(&nvkm_uevent, oclass, &uevent->object);
uevent->parent = parent;
uevent->func = NULL;
uevent->wait = args->v0.wait;
uevent->ntfy.event = NULL;
return parent->func->uevent(parent, &args->v0.data, argc - sizeof(args->v0), uevent);
}
......@@ -343,9 +343,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
/* Apparently we need to create a new one! */
ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
if (ret) {
nvkm_error(&disp->engine.subdev,
"failed to create outp %d conn: %d\n",
outp->index, ret);
nvkm_error(subdev, "failed to create outp %d conn: %d\n", outp->index, ret);
nvkm_conn_del(&outp->conn);
list_del(&outp->head);
nvkm_outp_del(&outp);
......@@ -355,7 +353,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
list_add_tail(&outp->conn->head, &disp->conns);
}
ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
ret = nvkm_event_init(&nvkm_disp_hpd_func, subdev, 3, hpd, &disp->hpd);
if (ret)
return ret;
......@@ -382,7 +380,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
list_for_each_entry(head, &disp->heads, head)
i = max(i, head->id + 1);
return nvkm_event_init(&nvkm_disp_vblank_func, 1, i, &disp->vblank);
return nvkm_event_init(&nvkm_disp_vblank_func, subdev, 1, i, &disp->vblank);
}
static void *
......@@ -473,5 +471,6 @@ nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
mutex_init(&disp->super.mutex);
}
return nvkm_event_init(func->uevent, 1, ARRAY_SIZE(disp->chan), &disp->uevent);
return nvkm_event_init(func->uevent, &disp->engine.subdev, 1, ARRAY_SIZE(disp->chan),
&disp->uevent);
}
......@@ -347,11 +347,11 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
return ret;
if (func->uevent_init) {
ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1,
ret = nvkm_event_init(&nvkm_fifo_uevent_func, &fifo->engine.subdev, 1, 1,
&fifo->uevent);
if (ret)
return ret;
}
return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent);
return nvkm_event_init(&nvkm_fifo_kevent_func, &fifo->engine.subdev, 1, nr, &fifo->kevent);
}
......@@ -107,5 +107,5 @@ nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw,
list_add(&chan->head, &sw->chan);
spin_unlock_irqrestore(&sw->engine.lock, flags);
return nvkm_event_init(&nvkm_sw_chan_event, 1, 1, &chan->event);
return nvkm_event_init(&nvkm_sw_chan_event, &sw->engine.subdev, 1, 1, &chan->event);
}
......@@ -130,8 +130,7 @@ nvkm_fault_oneinit(struct nvkm_subdev *subdev)
}
}
ret = nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
&fault->event);
ret = nvkm_event_init(&nvkm_fault_ntfy, subdev, 1, fault->buffer_nr, &fault->event);
if (ret)
return ret;
......
......@@ -251,6 +251,5 @@ nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
nvkm_subdev_ctor(&nvkm_gpio, device, type, inst, &gpio->subdev);
gpio->func = func;
return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
&gpio->event);
return nvkm_event_init(&nvkm_gpio_intr_func, &gpio->subdev, 2, func->lines, &gpio->event);
}
......@@ -427,5 +427,5 @@ nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
}
}
return nvkm_event_init(&nvkm_i2c_intr_func, 4, i, &i2c->event);
return nvkm_event_init(&nvkm_i2c_intr_func, &i2c->subdev, 4, i, &i2c->event);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment