Commit 8478cd5a authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/nvkm: add locking to subdev/engine init paths

This wasn't really needed before; the main place this could race is with
channel recovery, but (through potentially fragile means) shouldn't have
been possible.

However, a number of upcoming patches benefit from having better control
over subdev init, necessitating some improvements here.

- allows subdev/engine oneinit() without init() (host/fifo patches)
- merges engine use locking/tracking into subdev, and extends it to fix
  some issues that will arise with future usage patterns (acr patches)
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent 565bfaf1
......@@ -12,12 +12,6 @@ struct nvkm_engine {
const struct nvkm_engine_func *func;
struct nvkm_subdev subdev;
spinlock_t lock;
struct {
refcount_t refcount;
struct mutex mutex;
bool enabled;
} use;
};
struct nvkm_engine_func {
......
......@@ -17,12 +17,19 @@ struct nvkm_subdev {
struct nvkm_device *device;
enum nvkm_subdev_type type;
int inst;
char name[16];
u32 debug;
struct list_head head;
struct {
refcount_t refcount;
struct mutex mutex;
bool enabled;
} use;
struct nvkm_inth inth;
struct list_head head;
void **pself;
bool oneinit;
};
......@@ -40,11 +47,23 @@ struct nvkm_subdev_func {
extern const char *nvkm_subdev_type[NVKM_SUBDEV_NR];
int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *, enum nvkm_subdev_type,
int inst, struct nvkm_subdev **);
void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
enum nvkm_subdev_type, int inst, struct nvkm_subdev *);
void __nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
enum nvkm_subdev_type, int inst, struct nvkm_subdev *);
static inline void
nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev)
{
__nvkm_subdev_ctor(func, device, type, inst, subdev);
mutex_init(&subdev->use.mutex);
}
void nvkm_subdev_disable(struct nvkm_device *, enum nvkm_subdev_type, int inst);
void nvkm_subdev_del(struct nvkm_subdev **);
int nvkm_subdev_ref(struct nvkm_subdev *);
void nvkm_subdev_unref(struct nvkm_subdev *);
int nvkm_subdev_preinit(struct nvkm_subdev *);
int nvkm_subdev_oneinit(struct nvkm_subdev *);
int nvkm_subdev_init(struct nvkm_subdev *);
int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend);
int nvkm_subdev_info(struct nvkm_subdev *, u64, u64 *);
......
......@@ -39,12 +39,9 @@ void
nvkm_engine_unref(struct nvkm_engine **pengine)
{
struct nvkm_engine *engine = *pengine;
if (engine) {
if (refcount_dec_and_mutex_lock(&engine->use.refcount, &engine->use.mutex)) {
nvkm_subdev_fini(&engine->subdev, false);
engine->use.enabled = false;
mutex_unlock(&engine->use.mutex);
}
nvkm_subdev_unref(&engine->subdev);
*pengine = NULL;
}
}
......@@ -53,21 +50,13 @@ struct nvkm_engine *
nvkm_engine_ref(struct nvkm_engine *engine)
{
int ret;
if (engine) {
if (!refcount_inc_not_zero(&engine->use.refcount)) {
mutex_lock(&engine->use.mutex);
if (!refcount_inc_not_zero(&engine->use.refcount)) {
engine->use.enabled = true;
if ((ret = nvkm_subdev_init(&engine->subdev))) {
engine->use.enabled = false;
mutex_unlock(&engine->use.mutex);
return ERR_PTR(ret);
}
refcount_set(&engine->use.refcount, 1);
}
mutex_unlock(&engine->use.mutex);
}
ret = nvkm_subdev_ref(&engine->subdev);
if (ret)
return ERR_PTR(ret);
}
return engine;
}
......@@ -117,26 +106,6 @@ nvkm_engine_init(struct nvkm_subdev *subdev)
struct nvkm_engine *engine = nvkm_engine(subdev);
struct nvkm_fb *fb = subdev->device->fb;
int ret = 0, i;
s64 time;
if (!engine->use.enabled) {
nvkm_trace(subdev, "init skipped, engine has no users\n");
return ret;
}
if (engine->func->oneinit && !engine->subdev.oneinit) {
nvkm_trace(subdev, "one-time init running...\n");
time = ktime_to_us(ktime_get());
ret = engine->func->oneinit(engine);
if (ret) {
nvkm_trace(subdev, "one-time init failed, %d\n", ret);
return ret;
}
engine->subdev.oneinit = true;
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
}
if (engine->func->init)
ret = engine->func->init(engine);
......@@ -146,6 +115,17 @@ nvkm_engine_init(struct nvkm_subdev *subdev)
return ret;
}
static int
nvkm_engine_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->oneinit)
return engine->func->oneinit(engine);
return 0;
}
static int
nvkm_engine_preinit(struct nvkm_subdev *subdev)
{
......@@ -161,7 +141,6 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev)
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->dtor)
return engine->func->dtor(engine);
mutex_destroy(&engine->use.mutex);
return engine;
}
......@@ -169,6 +148,7 @@ const struct nvkm_subdev_func
nvkm_engine = {
.dtor = nvkm_engine_dtor,
.preinit = nvkm_engine_preinit,
.oneinit = nvkm_engine_oneinit,
.init = nvkm_engine_init,
.fini = nvkm_engine_fini,
.info = nvkm_engine_info,
......@@ -179,10 +159,9 @@ int
nvkm_engine_ctor(const struct nvkm_engine_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_engine *engine)
{
nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev);
engine->func = func;
refcount_set(&engine->use.refcount, 0);
mutex_init(&engine->use.mutex);
nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev);
refcount_set(&engine->subdev.use.refcount, 0);
if (!nvkm_boolopt(device->cfgopt, engine->subdev.name, enable)) {
nvkm_debug(&engine->subdev, "disabled\n");
......
......@@ -54,7 +54,7 @@ int
nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_device *device = subdev->device;
const char *action = suspend ? "suspend" : "fini";
const char *action = suspend ? "suspend" : subdev->use.enabled ? "fini" : "reset";
s64 time;
nvkm_trace(subdev, "%s running...\n", action);
......@@ -68,6 +68,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
return ret;
}
}
subdev->use.enabled = false;
nvkm_mc_reset(device, subdev->type, subdev->inst);
......@@ -97,30 +98,49 @@ nvkm_subdev_preinit(struct nvkm_subdev *subdev)
return 0;
}
int
nvkm_subdev_init(struct nvkm_subdev *subdev)
static int
nvkm_subdev_oneinit_(struct nvkm_subdev *subdev)
{
s64 time;
int ret;
nvkm_trace(subdev, "init running...\n");
if (!subdev->func->oneinit || subdev->oneinit)
return 0;
nvkm_trace(subdev, "one-time init running...\n");
time = ktime_to_us(ktime_get());
ret = subdev->func->oneinit(subdev);
if (ret) {
nvkm_error(subdev, "one-time init failed, %d\n", ret);
return ret;
}
if (subdev->func->oneinit && !subdev->oneinit) {
s64 time;
nvkm_trace(subdev, "one-time init running...\n");
time = ktime_to_us(ktime_get());
ret = subdev->func->oneinit(subdev);
if (ret) {
nvkm_error(subdev, "one-time init failed, %d\n", ret);
return ret;
}
subdev->oneinit = true;
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
return 0;
}
subdev->oneinit = true;
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
static int
nvkm_subdev_init_(struct nvkm_subdev *subdev)
{
s64 time;
int ret;
if (subdev->use.enabled) {
nvkm_trace(subdev, "init skipped, already running\n");
return 0;
}
nvkm_trace(subdev, "init running...\n");
time = ktime_to_us(ktime_get());
ret = nvkm_subdev_oneinit_(subdev);
if (ret)
return ret;
subdev->use.enabled = true;
if (subdev->func->init) {
ret = subdev->func->init(subdev);
if (ret) {
......@@ -134,6 +154,64 @@ nvkm_subdev_init(struct nvkm_subdev *subdev)
return 0;
}
int
nvkm_subdev_init(struct nvkm_subdev *subdev)
{
int ret;
mutex_lock(&subdev->use.mutex);
if (refcount_read(&subdev->use.refcount) == 0) {
nvkm_trace(subdev, "init skipped, no users\n");
mutex_unlock(&subdev->use.mutex);
return 0;
}
ret = nvkm_subdev_init_(subdev);
mutex_unlock(&subdev->use.mutex);
return ret;
}
int
nvkm_subdev_oneinit(struct nvkm_subdev *subdev)
{
int ret;
mutex_lock(&subdev->use.mutex);
ret = nvkm_subdev_oneinit_(subdev);
mutex_unlock(&subdev->use.mutex);
return ret;
}
void
nvkm_subdev_unref(struct nvkm_subdev *subdev)
{
if (refcount_dec_and_mutex_lock(&subdev->use.refcount, &subdev->use.mutex)) {
nvkm_subdev_fini(subdev, false);
mutex_unlock(&subdev->use.mutex);
}
}
int
nvkm_subdev_ref(struct nvkm_subdev *subdev)
{
int ret;
if (subdev && !refcount_inc_not_zero(&subdev->use.refcount)) {
mutex_lock(&subdev->use.mutex);
if (!refcount_inc_not_zero(&subdev->use.refcount)) {
if ((ret = nvkm_subdev_init_(subdev))) {
mutex_unlock(&subdev->use.mutex);
return ret;
}
refcount_set(&subdev->use.refcount, 1);
}
mutex_unlock(&subdev->use.mutex);
}
return 0;
}
void
nvkm_subdev_del(struct nvkm_subdev **psubdev)
{
......@@ -146,6 +224,7 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev)
list_del(&subdev->head);
if (subdev->func->dtor)
*psubdev = subdev->func->dtor(subdev);
mutex_destroy(&subdev->use.mutex);
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "destroy completed in %lldus\n", time);
kfree(*psubdev);
......@@ -167,8 +246,8 @@ nvkm_subdev_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int
}
void
nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev)
__nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev)
{
subdev->func = func;
subdev->device = device;
......@@ -180,6 +259,8 @@ nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device
else
strscpy(subdev->name, nvkm_subdev_type[type], sizeof(subdev->name));
subdev->debug = nvkm_dbgopt(device->dbgopt, subdev->name);
refcount_set(&subdev->use.refcount, 1);
list_add_tail(&subdev->head, &device->subdev);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment