Commit a7ab200a authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/intr: add nvkm_subdev_intr() compatibility

It's quite a lot of tedious and error-prone work to switch over all the
subdevs at once, so allow an nvkm_intr to request new-style handlers to
be created that wrap the existing interfaces.

This will allow a more gradual transition.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent 3ebd64aa
...@@ -69,6 +69,7 @@ struct nvkm_device { ...@@ -69,6 +69,7 @@ struct nvkm_device {
int irq; int irq;
bool alloc; bool alloc;
bool armed; bool armed;
bool legacy_done;
} intr; } intr;
}; };
......
...@@ -30,6 +30,7 @@ struct nvkm_intr { ...@@ -30,6 +30,7 @@ struct nvkm_intr {
int inst; int inst;
int leaf; int leaf;
u32 mask; /* 0-terminated. */ u32 mask; /* 0-terminated. */
bool legacy; /* auto-create "legacy" nvkm_subdev_intr() handler */
} *data; } *data;
struct nvkm_subdev *subdev; struct nvkm_subdev *subdev;
......
...@@ -21,6 +21,8 @@ struct nvkm_subdev { ...@@ -21,6 +21,8 @@ struct nvkm_subdev {
u32 debug; u32 debug;
struct list_head head; struct list_head head;
struct nvkm_inth inth;
void **pself; void **pself;
bool oneinit; bool oneinit;
}; };
......
...@@ -265,12 +265,73 @@ nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *da ...@@ -265,12 +265,73 @@ nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *da
return 0; return 0;
} }
static irqreturn_t
nvkm_intr_subdev(struct nvkm_inth *inth)
{
struct nvkm_subdev *subdev = container_of(inth, typeof(*subdev), inth);
nvkm_subdev_intr(subdev);
return IRQ_HANDLED;
}
static void
nvkm_intr_subdev_add_dev(struct nvkm_intr *intr, enum nvkm_subdev_type type, int inst)
{
struct nvkm_subdev *subdev;
enum nvkm_intr_prio prio;
int ret;
subdev = nvkm_device_subdev(intr->subdev->device, type, inst);
if (!subdev || !subdev->func->intr)
return;
if (type == NVKM_ENGINE_DISP)
prio = NVKM_INTR_PRIO_VBLANK;
else
prio = NVKM_INTR_PRIO_NORMAL;
ret = nvkm_inth_add(intr, NVKM_INTR_SUBDEV, prio, subdev, nvkm_intr_subdev, &subdev->inth);
if (WARN_ON(ret))
return;
nvkm_inth_allow(&subdev->inth);
}
static void
nvkm_intr_subdev_add(struct nvkm_intr *intr)
{
const struct nvkm_intr_data *data;
struct nvkm_device *device = intr->subdev->device;
struct nvkm_top_device *tdev;
for (data = intr->data; data && data->mask; data++) {
if (data->legacy) {
if (data->type == NVKM_SUBDEV_TOP) {
list_for_each_entry(tdev, &device->top->device, head) {
if (tdev->intr < 0 || !(data->mask & BIT(tdev->intr)))
continue;
nvkm_intr_subdev_add_dev(intr, tdev->type, tdev->inst);
}
} else {
nvkm_intr_subdev_add_dev(intr, data->type, data->inst);
}
}
}
}
void void
nvkm_intr_rearm(struct nvkm_device *device) nvkm_intr_rearm(struct nvkm_device *device)
{ {
struct nvkm_intr *intr; struct nvkm_intr *intr;
int i; int i;
if (unlikely(!device->intr.legacy_done)) {
list_for_each_entry(intr, &device->intr.intr, head)
nvkm_intr_subdev_add(intr);
device->intr.legacy_done = true;
}
spin_lock_irq(&device->intr.lock); spin_lock_irq(&device->intr.lock);
list_for_each_entry(intr, &device->intr.intr, head) { list_for_each_entry(intr, &device->intr.intr, head) {
for (i = 0; intr->func->block && i < intr->leaves; i++) { for (i = 0; intr->func->block && i < intr->leaves; i++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment