Commit 3517e6b6 authored by Ben Skeggs's avatar Ben Skeggs Committed by Dave Airlie

drm/nouveau/disp: group supervisor-related struct members

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent acbe9ecf
......@@ -18,9 +18,11 @@ struct nvkm_disp {
struct nvkm_event hpd;
struct nvkm_event vblank;
struct {
struct workqueue_struct *wq;
struct work_struct supervisor;
u32 super;
struct work_struct work;
u32 pending;
} super;
struct nvkm_event uevent;
......
......@@ -983,19 +983,19 @@ gf119_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
void
gf119_disp_super(struct work_struct *work)
{
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, supervisor);
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_head *head;
u32 mask[4];
nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super.pending));
list_for_each_entry(head, &disp->heads, head) {
mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800));
HEAD_DBG(head, "%08x", mask[head->id]);
}
if (disp->super & 0x00000001) {
if (disp->super.pending & 0x00000001) {
nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
nv50_disp_super_1(disp);
list_for_each_entry(head, &disp->heads, head) {
......@@ -1004,7 +1004,7 @@ gf119_disp_super(struct work_struct *work)
nv50_disp_super_1_0(disp, head);
}
} else
if (disp->super & 0x00000002) {
if (disp->super.pending & 0x00000002) {
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
......@@ -1022,7 +1022,7 @@ gf119_disp_super(struct work_struct *work)
nv50_disp_super_2_2(disp, head);
}
} else
if (disp->super & 0x00000004) {
if (disp->super.pending & 0x00000004) {
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
......@@ -1096,9 +1096,9 @@ gf119_disp_intr(struct nvkm_disp *disp)
if (intr & 0x00100000) {
u32 stat = nvkm_rd32(device, 0x6100ac);
if (stat & 0x00000007) {
disp->super = (stat & 0x00000007);
queue_work(disp->wq, &disp->supervisor);
nvkm_wr32(device, 0x6100ac, disp->super);
disp->super.pending = (stat & 0x00000007);
queue_work(disp->super.wq, &disp->super.work);
nvkm_wr32(device, 0x6100ac, disp->super.pending);
stat &= ~0x00000007;
}
......
......@@ -897,20 +897,20 @@ gv100_disp_caps_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
void
gv100_disp_super(struct work_struct *work)
{
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, supervisor);
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_head *head;
u32 stat = nvkm_rd32(device, 0x6107a8);
u32 mask[4];
nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super), stat);
nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super.pending), stat);
list_for_each_entry(head, &disp->heads, head) {
mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4));
HEAD_DBG(head, "%08x", mask[head->id]);
}
if (disp->super & 0x00000001) {
if (disp->super.pending & 0x00000001) {
nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
nv50_disp_super_1(disp);
list_for_each_entry(head, &disp->heads, head) {
......@@ -919,7 +919,7 @@ gv100_disp_super(struct work_struct *work)
nv50_disp_super_1_0(disp, head);
}
} else
if (disp->super & 0x00000002) {
if (disp->super.pending & 0x00000002) {
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
......@@ -937,7 +937,7 @@ gv100_disp_super(struct work_struct *work)
nv50_disp_super_2_2(disp, head);
}
} else
if (disp->super & 0x00000004) {
if (disp->super.pending & 0x00000004) {
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
......@@ -1000,9 +1000,9 @@ gv100_disp_intr_ctrl_disp(struct nvkm_disp *disp)
u32 stat = nvkm_rd32(device, 0x611c30);
if (stat & 0x00000007) {
disp->super = (stat & 0x00000007);
queue_work(disp->wq, &disp->supervisor);
nvkm_wr32(device, 0x611860, disp->super);
disp->super.pending = (stat & 0x00000007);
queue_work(disp->super.wq, &disp->super.work);
nvkm_wr32(device, 0x611860, disp->super.pending);
stat &= ~0x00000007;
}
......
......@@ -1521,15 +1521,15 @@ nv50_disp_super_1(struct nvkm_disp *disp)
void
nv50_disp_super(struct work_struct *work)
{
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, supervisor);
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_head *head;
u32 super = nvkm_rd32(device, 0x610030);
nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super.pending, super);
if (disp->super & 0x00000010) {
if (disp->super.pending & 0x00000010) {
nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
nv50_disp_super_1(disp);
list_for_each_entry(head, &disp->heads, head) {
......@@ -1540,7 +1540,7 @@ nv50_disp_super(struct work_struct *work)
nv50_disp_super_1_0(disp, head);
}
} else
if (disp->super & 0x00000020) {
if (disp->super.pending & 0x00000020) {
list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000080 << head->id)))
continue;
......@@ -1558,7 +1558,7 @@ nv50_disp_super(struct work_struct *work)
nv50_disp_super_2_2(disp, head);
}
} else
if (disp->super & 0x00000040) {
if (disp->super.pending & 0x00000040) {
list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000080 << head->id)))
continue;
......@@ -1651,9 +1651,9 @@ nv50_disp_intr(struct nvkm_disp *disp)
}
if (intr1 & 0x00000070) {
disp->super = (intr1 & 0x00000070);
queue_work(disp->wq, &disp->supervisor);
nvkm_wr32(device, 0x610024, disp->super);
disp->super.pending = (intr1 & 0x00000070);
queue_work(disp->super.wq, &disp->super.work);
nvkm_wr32(device, 0x610024, disp->super.pending);
}
}
......@@ -1795,8 +1795,8 @@ nv50_disp_dtor(struct nvkm_disp *disp)
nvkm_gpuobj_del(&disp->inst);
nvkm_event_fini(&disp->uevent);
if (disp->wq)
destroy_workqueue(disp->wq);
if (disp->super.wq)
destroy_workqueue(disp->super.wq);
return disp;
}
......@@ -1841,11 +1841,11 @@ nv50_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
if (ret)
return ret;
disp->wq = create_singlethread_workqueue("nvkm-disp");
if (!disp->wq)
disp->super.wq = create_singlethread_workqueue("nvkm-disp");
if (!disp->super.wq)
return -ENOMEM;
INIT_WORK(&disp->supervisor, func->super);
INIT_WORK(&disp->super.work, func->super);
return nvkm_event_init(func->uevent, 1, ARRAY_SIZE(disp->chan),
&disp->uevent);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment